diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index ee0f3701c114..5bc20d744c5e 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -59,8 +59,6 @@ The following command installs `buildkitd` and `buildctl` to `/usr/local/bin`:
make && sudo make install
```
-You can also use `make binaries-all` to prepare `buildkitd.containerd_only` and `buildkitd.oci_only`.
-
To build containerized `moby/buildkit:local` and `moby/buildkit:local-rootless` images:
```bash
make images
@@ -152,7 +150,7 @@ otherwise cleanup our project.
Register for the Docker Community Slack (dockercommunity.slack.com)
- Click here for an invite to docker community slack .
+ Click here for an invite to docker community slack .
You'll find us in #buildkit
channel, and the #moby-project
channel for general discussions.
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 000000000000..8d77e584d110
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,10 @@
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ open-pull-requests-limit: 10
+ directory: "/"
+ schedule:
+ interval: "daily"
+ labels:
+ - "dependencies"
+ - "bot"
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index f8f3ef95de06..40d60dc762a4 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -1,5 +1,9 @@
name: build
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
on:
schedule:
- cron: '0 10 * * *' # everyday at 10am
@@ -7,40 +11,46 @@ on:
push:
branches:
- 'master'
+ - 'v[0-9]+.[0-9]+'
tags:
- 'v*'
- 'dockerfile/*'
pull_request:
- branches:
- - 'master'
- - 'v*'
+ paths-ignore:
+ - 'README.md'
+ - 'docs/**'
+ - 'frontend/dockerfile/docs/**'
env:
- REPO_SLUG_ORIGIN: "moby/buildkit:v0.10.0-rc1"
+ REPO_SLUG_ORIGIN: "moby/buildkit:v0.11.0-rc4"
REPO_SLUG_TARGET: "moby/buildkit"
DF_REPO_SLUG_TARGET: "docker/dockerfile-upstream"
PLATFORMS: "linux/amd64,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le,linux/riscv64"
CACHE_GHA_SCOPE_IT: "integration-tests"
CACHE_GHA_SCOPE_BINARIES: "binaries"
CACHE_GHA_SCOPE_CROSS: "cross"
+ TESTFLAGS: "-v --parallel=6 --timeout=30m"
+ BUILDX_VERSION: "v0.10.0-rc3" # leave empty to use the one available on GitHub virtual environment
+ GO_VERSION: "1.19"
jobs:
base:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Expose GitHub Runtime
- uses: crazy-max/ghaction-github-runtime@v1
+ uses: crazy-max/ghaction-github-runtime@v2
-
name: Set up QEMU
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
+ version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
buildkitd-flags: --debug
-
@@ -59,7 +69,7 @@ jobs:
CACHE_TO: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }}
test:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [base]
strategy:
fail-fast: false
@@ -71,7 +81,6 @@ jobs:
- containerd
- containerd-rootless
- containerd-1.5
- - containerd-1.4
- containerd-snapshotter-stargz
- oci
- oci-rootless
@@ -89,23 +98,23 @@ jobs:
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Expose GitHub Runtime
- uses: crazy-max/ghaction-github-runtime@v1
+ uses: crazy-max/ghaction-github-runtime@v2
-
name: Set up QEMU
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
+ version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
buildkitd-flags: --debug
-
name: Test pkg=${{ matrix.pkg }} ; typ=${{ matrix.typ }} ; skipit=${{ matrix.skip-integration-tests }} ; worker=${{ matrix.worker }}
run: |
- export TESTFLAGS="-v --parallel=6 --timeout=20m"
if [ -n "${{ matrix.worker }}" ]; then
export TESTFLAGS="${TESTFLAGS} --run=//worker=${{ matrix.worker }}$"
fi
@@ -118,37 +127,126 @@ jobs:
CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }}
-
name: Upload coverage file
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: coverage
path: ./coverage
+ test-nydus:
+ runs-on: ubuntu-20.04
+ needs: [base]
+ strategy:
+ fail-fast: false
+ matrix:
+ pkg:
+ - ./client
+ worker:
+ - containerd
+ - oci
+ typ:
+ - integration
+ exclude:
+ - pkg: ./client ./cmd/buildctl ./worker/containerd ./solver ./frontend
+ typ: dockerfile
+ include:
+ - pkg: ./...
+ skip-integration-tests: 1
+ typ: integration
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v3
+ -
+ name: Expose GitHub Runtime
+ uses: crazy-max/ghaction-github-runtime@v2
+ -
+ name: Set up QEMU
+ uses: docker/setup-qemu-action@v2
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ with:
+ version: ${{ env.BUILDX_VERSION }}
+ driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
+ buildkitd-flags: --debug
+ -
+ name: Test pkg=${{ matrix.pkg }} ; typ=${{ matrix.typ }} ; skipit=${{ matrix.skip-integration-tests }} ; worker=${{ matrix.worker }}
+ run: |
+ if [ -n "${{ matrix.worker }}" ]; then
+ export TESTFLAGS="${TESTFLAGS} --tags=nydus --run=//worker=${{ matrix.worker }}$"
+ fi
+ ./hack/test ${{ matrix.typ }}
+ env:
+ BUILDKITD_TAGS: nydus
+ TESTPKGS: ${{ matrix.pkg }}
+ SKIP_INTEGRATION_TESTS: ${{ matrix.skip-integration-tests }}
+ CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }}
+
+ test-s3:
+ runs-on: ubuntu-20.04
+ needs:
+ - base
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v3
+ -
+ name: Expose GitHub Runtime
+ uses: crazy-max/ghaction-github-runtime@v2
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ with:
+ version: ${{ env.BUILDX_VERSION }}
+ driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
+ buildkitd-flags: --debug
+ -
+ name: Test
+ run: |
+ hack/s3_test/run_test.sh
+
+ test-azblob:
+ runs-on: ubuntu-20.04
+ needs:
+ - base
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v3
+ -
+ name: Expose GitHub Runtime
+ uses: crazy-max/ghaction-github-runtime@v2
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ with:
+ version: ${{ env.BUILDX_VERSION }}
+ driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
+ buildkitd-flags: --debug
+ -
+ name: Test
+ run: |
+ hack/azblob_test/run_test.sh
+
test-os:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os:
- # - ubuntu-latest
- # - macOS-latest
- - windows-latest
+ # - ubuntu-20.04
+ # - macOS-11
+ - windows-2022
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Set up Go
- uses: actions/setup-go@v2
- with:
- go-version: 1.16
- -
- name: Cache Go modules
- uses: actions/cache@v2
+ uses: actions/setup-go@v3
with:
- path: ~/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
+ go-version: ${{ env.GO_VERSION }}
+ cache: true
-
name: Go mod
run: |
@@ -160,31 +258,31 @@ jobs:
SKIP_INTEGRATION_TESTS: 1
run: |
mkdir -p ./coverage
- go test -coverprofile=./coverage/coverage-${{ github.job }}-${{ matrix.os }}.txt -covermode=atomic ./...
+ go test -coverprofile=./coverage/coverage-${{ github.job }}-${{ matrix.os }}.txt -covermode=atomic ${TESTFLAGS} ./...
shell: bash
-
name: Upload coverage file
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: coverage
path: ./coverage
upload-coverage:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [test, test-os]
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Download coverage files
- uses: actions/download-artifact@v2
+ uses: actions/download-artifact@v3
with:
name: coverage
path: ./coverage
-
name: List coverage files
- uses: actions/github-script@v3
+ uses: actions/github-script@v6
id: files
with:
result-encoding: string
@@ -195,26 +293,27 @@ jobs:
.join(',');
-
name: Send to Codecov
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v3
with:
files: ${{ steps.files.outputs.result }}
cross:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Expose GitHub Runtime
- uses: crazy-max/ghaction-github-runtime@v1
+ uses: crazy-max/ghaction-github-runtime@v2
-
name: Set up QEMU
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
+ version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
buildkitd-flags: --debug
-
@@ -228,7 +327,7 @@ jobs:
CACHE_TO: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }}
release-base:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
outputs:
tag: ${{ steps.prep.outputs.tag }}
push: ${{ steps.prep.outputs.push }}
@@ -246,13 +345,15 @@ jobs:
PUSH=push
elif [[ $GITHUB_REF == refs/heads/* ]]; then
TAG=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
- PUSH=push
+ if [ $GITHUB_REF = "refs/heads/${{ github.event.repository.default_branch }}" ]; then
+ PUSH=push
+ fi
fi
- echo ::set-output name=tag::${TAG}
- echo ::set-output name=push::${PUSH}
+ echo "tag=${TAG}" >>${GITHUB_OUTPUT}
+ echo "push=${PUSH}" >>${GITHUB_OUTPUT}
image:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [release-base, test, cross]
strategy:
fail-fast: false
@@ -263,23 +364,24 @@ jobs:
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Expose GitHub Runtime
- uses: crazy-max/ghaction-github-runtime@v1
+ uses: crazy-max/ghaction-github-runtime@v2
-
name: Set up QEMU
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
+ version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
buildkitd-flags: --debug
-
name: Login to DockerHub
if: needs.release-base.outputs.push == 'push'
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
@@ -288,27 +390,29 @@ jobs:
run: |
./hack/images "${{ needs.release-base.outputs.tag }}" "$REPO_SLUG_TARGET" "${{ needs.release-base.outputs.push }}"
env:
+ RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }}
TARGET: ${{ matrix.target-stage }}
CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} type=gha,scope=image${{ matrix.target-stage }}
CACHE_TO: type=gha,scope=image${{ matrix.target-stage }}
binaries:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
needs: [release-base, test, cross]
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Expose GitHub Runtime
- uses: crazy-max/ghaction-github-runtime@v1
+ uses: crazy-max/ghaction-github-runtime@v2
-
name: Set up QEMU
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
+ version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
buildkitd-flags: --debug
-
@@ -316,15 +420,12 @@ jobs:
run: |
./hack/release-tar "${{ needs.release-base.outputs.tag }}" release-out
env:
+ RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }}
PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }}
- -
- name: Move artifacts
- run: |
- mv ./release-out/**/* ./release-out/
-
name: Upload artifacts
- uses: actions/upload-artifact@v2
+ uses: actions/upload-artifact@v3
with:
name: buildkit
path: ./release-out/*
@@ -332,7 +433,7 @@ jobs:
-
name: GitHub Release
if: startsWith(github.ref, 'refs/tags/v')
- uses: softprops/action-gh-release@v1
+ uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
@@ -341,12 +442,12 @@ jobs:
name: ${{ needs.release-base.outputs.tag }}
frontend-base:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
if: github.event_name != 'schedule'
outputs:
typ: ${{ steps.prep.outputs.typ }}
- tag: ${{ steps.prep.outputs.tag }}
push: ${{ steps.prep.outputs.push }}
+ matrix: ${{ steps.prep.outputs.matrix }}
steps:
-
name: Prepare
@@ -359,51 +460,63 @@ jobs:
TYP=tag
TAG=${GITHUB_REF#refs/tags/}
PUSH=push
- elif [[ $GITHUB_REF == refs/heads/* ]]; then
+ elif [ $GITHUB_REF = "refs/heads/${{ github.event.repository.default_branch }}" ]; then
PUSH=push
fi
- echo ::set-output name=typ::${TYP}
- echo ::set-output name=tag::${TAG}
- echo ::set-output name=push::${PUSH}
+ echo "typ=${TYP}" >>${GITHUB_OUTPUT}
+ echo "push=${PUSH}" >>${GITHUB_OUTPUT}
+ if [ "${TYP}" = "master" ]; then
+ echo "matrix=$(jq -cn --arg tag "$TAG" '[$tag, "labs"]')" >>${GITHUB_OUTPUT}
+ else
+ echo "matrix=$(jq -cn --arg tag "$TAG" '[$tag]')" >>${GITHUB_OUTPUT}
+ fi
frontend-image:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
if: github.event_name != 'schedule'
needs: [frontend-base, test]
+ strategy:
+ fail-fast: false
+ matrix:
+ tag: ${{ fromJson(needs.frontend-base.outputs.matrix) }}
steps:
+ -
+ name: Prepare
+ run: |
+ if [ "${{ matrix.tag }}" = "labs" ]; then
+ echo "CACHE_SCOPE=frontend-labs" >>${GITHUB_ENV}
+ else
+ echo "CACHE_SCOPE=frontend-mainline" >>${GITHUB_ENV}
+ fi
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Expose GitHub Runtime
- uses: crazy-max/ghaction-github-runtime@v1
+ uses: crazy-max/ghaction-github-runtime@v2
-
name: Set up QEMU
- uses: docker/setup-qemu-action@v1
+ uses: docker/setup-qemu-action@v2
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
+ version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
buildkitd-flags: --debug
-
name: Login to DockerHub
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
if: needs.frontend-base.outputs.push == 'push'
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
- name: Build ${{ needs.frontend-base.outputs.typ }}/${{ needs.frontend-base.outputs.tag }}
- run: |
- ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" "${{ needs.frontend-base.outputs.tag }}" "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}"
- env:
- CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }}
- CACHE_TO: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }}
- -
- name: Build ${{ needs.frontend-base.outputs.typ }}/labs
- if: needs.frontend-base.outputs.typ == 'master'
+ name: Build
run: |
- ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" labs "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}"
+ ./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" "${{ matrix.tag }}" "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}"
env:
- CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }}
+ RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }}
+ PLATFORMS: ${{ env.PLATFORMS }},linux/386,linux/mips,linux/mipsle,linux/mips64,linux/mips64le
+ CACHE_FROM: type=gha,scope=${{ env.CACHE_SCOPE }}
+ CACHE_TO: type=gha,scope=${{ env.CACHE_SCOPE }}
diff --git a/.github/workflows/buildx-image.yml b/.github/workflows/buildx-image.yml
index e97ca41c579c..d9c655480f06 100644
--- a/.github/workflows/buildx-image.yml
+++ b/.github/workflows/buildx-image.yml
@@ -9,6 +9,10 @@
# moby/buildkit:v0.8.1-rootless > moby/buildkit:buildx-stable-1-rootless
name: buildx-image
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
on:
workflow_dispatch:
inputs:
@@ -27,10 +31,11 @@ on:
env:
REPO_SLUG_TARGET: "moby/buildkit"
+ BUILDX_VERSION: "v0.9.1" # leave empty to use the one available on GitHub virtual environment
jobs:
create:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
@@ -40,11 +45,14 @@ jobs:
steps:
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
+ with:
+ version: ${{ env.BUILDX_VERSION }}
+ buildkitd-flags: --debug
-
name: Login to DockerHub
if: github.event.inputs.dry-run != 'true'
- uses: docker/login-action@v1
+ uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
diff --git a/.github/workflows/dockerd.yml b/.github/workflows/dockerd.yml
new file mode 100644
index 000000000000..436288dc0846
--- /dev/null
+++ b/.github/workflows/dockerd.yml
@@ -0,0 +1,139 @@
+name: dockerd
+
+on:
+ # TODO: add event to build on command in PR (e.g., /test-dockerd)
+ workflow_dispatch:
+ inputs:
+ version:
+ description: 'Docker version'
+ required: true
+ default: '20.10.19'
+
+env:
+ REPO_SLUG_ORIGIN: "moby/buildkit:latest"
+ CACHE_GHA_SCOPE_IT: "integration-tests"
+ CACHE_GHA_SCOPE_BINARIES: "binaries"
+ TESTFLAGS: "-v --parallel=1 --timeout=30m"
+ BUILDX_VERSION: "v0.9.1" # leave empty to use the one available on GitHub virtual environment
+
+jobs:
+ prepare:
+ runs-on: ubuntu-20.04
+ steps:
+ -
+ name: Check version
+ run: |
+ version=${{ github.event.inputs.version }}
+ if [ -z "$version" ]; then
+ version=20.10.19
+ fi
+ echo "DOCKER_VERSION=$version" >> $GITHUB_ENV
+ -
+ name: Check build
+ uses: actions/github-script@v6
+ id: build
+ with:
+ result-encoding: string
+ script: |
+ try {
+ new URL("${{ env.DOCKER_VERSION }}");
+ } catch (e) {
+ return false;
+ }
+ return true;
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ with:
+ version: ${{ env.BUILDX_VERSION }}
+ driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
+ buildkitd-flags: --debug
+ -
+ name: Build
+ if: steps.build.outputs.result == 'true'
+ uses: docker/build-push-action@v3
+ with:
+ context: ${{ env.DOCKER_VERSION }}
+ target: binary
+ outputs: /tmp/moby
+ -
+ name: Rename binary
+ if: steps.build.outputs.result == 'true'
+ run: |
+ if [ -L "/tmp/moby/binary-daemon/dockerd" ]; then
+ mv -f $(readlink /tmp/moby/binary-daemon/dockerd) /tmp/moby/dockerd
+ fi
+ -
+ name: Download
+ if: steps.build.outputs.result != 'true'
+ run: |
+ mkdir -p /tmp/moby
+ cd /tmp/moby
+ wget -qO- "https://download.docker.com/linux/static/stable/x86_64/docker-${{ env.DOCKER_VERSION }}.tgz" | tar xvz --strip 1
+ -
+ name: Upload dockerd
+ uses: actions/upload-artifact@v3
+ with:
+ name: dockerd
+ path: /tmp/moby/dockerd
+ if-no-files-found: error
+
+ test:
+ runs-on: ubuntu-20.04
+ needs:
+ - prepare
+ strategy:
+ fail-fast: false
+ matrix:
+ worker:
+ - dockerd
+ - dockerd-containerd
+ pkg:
+ - ./client
+ - ./cmd/buildctl
+ - ./solver
+ - ./frontend
+ - ./frontend/dockerfile
+ typ:
+ - integration
+ include:
+ - pkg: ./...
+ skip-integration-tests: 1
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v3
+ -
+ name: Expose GitHub Runtime
+ uses: crazy-max/ghaction-github-runtime@v2
+ -
+ name: Set up QEMU
+ uses: docker/setup-qemu-action@v2
+ -
+ name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v2
+ with:
+ version: ${{ env.BUILDX_VERSION }}
+ driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
+ buildkitd-flags: --debug
+ -
+ name: Download dockerd
+ uses: actions/download-artifact@v3
+ with:
+ name: dockerd
+ path: ./build/
+ -
+ name: Fix dockerd perms
+ run: |
+ chmod +x ./build/dockerd
+ -
+ name: Test
+ run: |
+ ./hack/test ${{ matrix.typ }}
+ env:
+ TEST_DOCKERD: "1"
+ TEST_DOCKERD_BINARY: "./build/dockerd"
+ TESTPKGS: "${{ matrix.pkg }}"
+ TESTFLAGS: "${{ env.TESTFLAGS }} --run=//worker=${{ matrix.worker }}$"
+ SKIP_INTEGRATION_TESTS: "${{ matrix.skip-integration-tests }}"
+ CACHE_FROM: "type=gha,scope=${{ env.CACHE_GHA_SCOPE_IT }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }}"
diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml
index ba5d757b7fb2..21bdc61939e0 100644
--- a/.github/workflows/validate.yml
+++ b/.github/workflows/validate.yml
@@ -1,24 +1,27 @@
name: validate
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
on:
workflow_dispatch:
push:
branches:
- 'master'
+ - 'v[0-9]+.[0-9]+'
tags:
- 'v*'
- 'dockerfile/*'
pull_request:
- branches:
- - 'master'
- - 'v*'
env:
REPO_SLUG_ORIGIN: "moby/buildkit:latest"
+ BUILDX_VERSION: "v0.9.1" # leave empty to use the one available on GitHub virtual environment
jobs:
validate:
- runs-on: ubuntu-latest
+ runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
@@ -30,17 +33,15 @@ jobs:
steps:
-
name: Checkout
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
-
name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v1
+ uses: docker/setup-buildx-action@v2
with:
+ version: ${{ env.BUILDX_VERSION }}
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
+ buildkitd-flags: --debug
-
name: Run
run: |
${{ matrix.script }}
- -
- name: Dump context
- if: always()
- uses: crazy-max/ghaction-dump-context@v1
diff --git a/.gitignore b/.gitignore
index 5b74bfefa9f7..75c0a9be9885 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,6 @@
+# BuildKit project generated files to ignore
+# if you want to ignore files created by your editor/tools,
+# please consider a global .gitignore https://help.github.com/articles/ignoring-files
bin
coverage
release-out
diff --git a/.golangci.yml b/.golangci.yml
index 28911415b7d0..2917d8c47aaf 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -7,19 +7,22 @@ run:
build-tags:
- dfrunsecurity
+ - dfaddgit
+ - dfaddchecksum
linters:
enable:
- deadcode
+ - depguard
- gofmt
- goimports
+ - gosimple
- revive
- govet
- importas
- ineffassign
- misspell
- staticcheck
- - structcheck
- typecheck
- unused
- varcheck
@@ -27,9 +30,22 @@ linters:
- errname
- makezero
- whitespace
+ - nolintlint
+ - gosec
+ - forbidigo
disable-all: true
linters-settings:
+ depguard:
+ list-type: blacklist
+ include-go-root: true
+ packages:
+ # The io/ioutil package has been deprecated.
+ # https://go.dev/doc/go1.16#ioutil
+ - io/ioutil
+ forbidigo:
+ forbid:
+ - '^fmt\.Errorf(# use errors\.Errorf instead)?$'
importas:
alias:
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
@@ -37,9 +53,20 @@ linters-settings:
- pkg: "github.com/opencontainers/go-digest"
alias: "digest"
no-unaliased: true
+ gosec:
+ excludes:
+ - G101 # Potential hardcoded credentials (false positives)
+ - G402 # TLS MinVersion too low
+ - G601 # Implicit memory aliasing in for loop (false positives)
+ - G504 # Import blocklist: net/http/cgi
+ config:
+ G306: "0644"
issues:
exclude-rules:
- linters:
- revive
text: "stutters"
+ - linters:
+ - staticcheck
+ text: "SA1019: .*Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md"
diff --git a/Dockerfile b/Dockerfile
index 14e9bd0e6d8d..b64f57bd8358 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,29 +1,39 @@
# syntax=docker/dockerfile-upstream:master
-ARG RUNC_VERSION=v1.0.2
-ARG CONTAINERD_VERSION=v1.6.1
+ARG RUNC_VERSION=v1.1.4
+ARG CONTAINERD_VERSION=v1.6.18
# containerd v1.5 for integration tests
-ARG CONTAINERD_ALT_VERSION_15=v1.5.10
-# containerd v1.4 for integration tests
-ARG CONTAINERD_ALT_VERSION_14=v1.4.13
-# available targets: buildkitd, buildkitd.oci_only, buildkitd.containerd_only
-ARG BUILDKIT_TARGET=buildkitd
+ARG CONTAINERD_ALT_VERSION_15=v1.5.18
ARG REGISTRY_VERSION=2.8.0
-ARG ROOTLESSKIT_VERSION=v0.14.6
-ARG CNI_VERSION=v1.1.0
-ARG STARGZ_SNAPSHOTTER_VERSION=v0.11.2
+ARG ROOTLESSKIT_VERSION=v1.0.1
+ARG CNI_VERSION=v1.1.1
+ARG STARGZ_SNAPSHOTTER_VERSION=v0.13.0
+ARG NERDCTL_VERSION=v1.0.0
+ARG DNSNAME_VERSION=v1.3.1
+ARG NYDUS_VERSION=v2.1.0
+
+ARG ALPINE_VERSION=3.17
+
+# alpine base for buildkit image
+# TODO: remove this when alpine image supports riscv64
+FROM alpine:${ALPINE_VERSION} AS alpine-amd64
+FROM alpine:${ALPINE_VERSION} AS alpine-arm
+FROM alpine:${ALPINE_VERSION} AS alpine-arm64
+FROM alpine:${ALPINE_VERSION} AS alpine-s390x
+FROM alpine:${ALPINE_VERSION} AS alpine-ppc64le
+FROM alpine:edge@sha256:c223f84e05c23c0571ce8decefef818864869187e1a3ea47719412e205c8c64e AS alpine-riscv64
+FROM alpine-$TARGETARCH AS alpinebase
-ARG ALPINE_VERSION=3.15
+# xx is a helper for cross-compilation
+FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.2.1 AS xx
+
+# go base image
+FROM --platform=$BUILDPLATFORM golang:1.19-alpine${ALPINE_VERSION} AS golatest
# git stage is used for checking out remote repository sources
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS git
RUN apk add --no-cache git
-# xx is a helper for cross-compilation
-FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:1e96844fadaa2f9aea021b2b05299bc02fe4c39a92d8e735b93e8e2b15610128 AS xx
-
-FROM --platform=$BUILDPLATFORM golang:1.17-alpine AS golatest
-
# gobuild is base stage for compiling go/cgo
FROM golatest AS gobuild-base
RUN apk add --no-cache file bash clang lld pkgconfig git make
@@ -48,6 +58,15 @@ RUN --mount=from=runc-src,src=/usr/src/runc,target=. --mount=target=/root/.cache
CGO_ENABLED=1 xx-go build -mod=vendor -ldflags '-extldflags -static' -tags 'apparmor seccomp netgo cgo static_build osusergo' -o /usr/bin/runc ./ && \
xx-verify --static /usr/bin/runc
+# dnsname CNI plugin for testing
+FROM gobuild-base AS dnsname
+ARG DNSNAME_VERSION
+WORKDIR /go/dnsname
+RUN git clone https://github.com/containers/dnsname.git . \
+ && git checkout -q "$DNSNAME_VERSION"
+RUN --mount=target=/root/.cache,type=cache \
+ set -e; make binaries; mv bin/dnsname /usr/bin/dnsname
+
FROM gobuild-base AS buildkit-base
WORKDIR /src
ENV GOFLAGS=-mod=vendor
@@ -72,6 +91,7 @@ RUN --mount=target=. --mount=target=/root/.cache,type=cache \
# build buildkitd binary
FROM buildkit-base AS buildkitd
+# BUILDKITD_TAGS defines additional Go build tags for compiling buildkitd
ARG BUILDKITD_TAGS
ARG TARGETPLATFORM
RUN --mount=target=. --mount=target=/root/.cache,type=cache \
@@ -82,8 +102,9 @@ RUN --mount=target=. --mount=target=/root/.cache,type=cache \
FROM scratch AS binaries-linux-helper
COPY --link --from=runc /usr/bin/runc /buildkit-runc
-# built from https://github.com/tonistiigi/binfmt/releases/tag/buildkit%2Fv6.2.0-24
-COPY --link --from=tonistiigi/binfmt:buildkit@sha256:ea7632b4e0b2406db438730c604339b38c23ac51a2f73c89ba50abe5e2146b4b / /
+# built from https://github.com/tonistiigi/binfmt/releases/tag/buildkit%2Fv7.1.0-30
+COPY --link --from=tonistiigi/binfmt:buildkit-v7.1.0-30@sha256:45dd57b4ba2f24e2354f71f1e4e51f073cb7a28fd848ce6f5f2a7701142a6bf0 / /
+
FROM binaries-linux-helper AS binaries-linux
COPY --link --from=buildctl /usr/bin/buildctl /
COPY --link --from=buildkitd /usr/bin/buildkitd /
@@ -95,6 +116,8 @@ FROM scratch AS binaries-windows
COPY --link --from=buildctl /usr/bin/buildctl /buildctl.exe
FROM binaries-$TARGETOS AS binaries
+# enable scanning for this stage
+ARG BUILDKIT_SBOM_SCAN_STAGE=true
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS releaser
RUN apk add --no-cache tar gzip
@@ -107,8 +130,7 @@ RUN --mount=from=binaries \
FROM scratch AS release
COPY --link --from=releaser /out/ /
-# tonistiigi/alpine supports riscv64
-FROM tonistiigi/alpine:${ALPINE_VERSION} AS buildkit-export
+FROM alpinebase AS buildkit-export
RUN apk add --no-cache fuse3 git openssh pigz xz \
&& ln -s fusermount3 /usr/bin/fusermount
COPY --link examples/buildctl-daemonless/buildctl-daemonless.sh /usr/bin/
@@ -123,7 +145,7 @@ RUN git clone https://github.com/containerd/containerd.git containerd
FROM gobuild-base AS containerd-base
WORKDIR /go/src/github.com/containerd/containerd
ARG TARGETPLATFORM
-ENV CGO_ENABLED=1 BUILDTAGS=no_btrfs
+ENV CGO_ENABLED=1 BUILDTAGS=no_btrfs GO111MODULE=off
RUN xx-apk add musl-dev gcc && xx-go --wrap
FROM containerd-base AS containerd
@@ -139,7 +161,6 @@ RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target
# containerd v1.5 for integration tests
FROM containerd-base as containerd-alt-15
ARG CONTAINERD_ALT_VERSION_15
-ARG GO111MODULE=off
RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target=/root/.cache,type=cache \
git fetch origin \
&& git checkout -q "$CONTAINERD_ALT_VERSION_15" \
@@ -147,17 +168,6 @@ RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target
&& make bin/containerd-shim-runc-v2 \
&& mv bin /out
-# containerd v1.4 for integration tests
-FROM containerd-base as containerd-alt-14
-ARG CONTAINERD_ALT_VERSION_14
-ARG GO111MODULE=off
-RUN --mount=from=containerd-src,src=/usr/src/containerd,readwrite --mount=target=/root/.cache,type=cache \
- git fetch origin \
- && git checkout -q "$CONTAINERD_ALT_VERSION_14" \
- && make bin/containerd \
- && make bin/containerd-shim-runc-v2 \
- && mv bin /out
-
ARG REGISTRY_VERSION
FROM registry:$REGISTRY_VERSION AS registry
@@ -183,39 +193,24 @@ RUN --mount=target=/root/.cache,type=cache \
xx-verify --static /out/containerd-stargz-grpc && \
xx-verify --static /out/ctr-remote
-# Copy together all binaries needed for oci worker mode
-FROM buildkit-export AS buildkit-buildkitd.oci_only
-COPY --link --from=buildkitd.oci_only /usr/bin/buildkitd.oci_only /usr/bin/
-COPY --link --from=buildctl /usr/bin/buildctl /usr/bin/
-ENTRYPOINT ["buildkitd.oci_only"]
-
-# Copy together all binaries for containerd worker mode
-FROM buildkit-export AS buildkit-buildkitd.containerd_only
-COPY --link --from=buildkitd.containerd_only /usr/bin/buildkitd.containerd_only /usr/bin/
-COPY --link --from=buildctl /usr/bin/buildctl /usr/bin/
-ENTRYPOINT ["buildkitd.containerd_only"]
+FROM gobuild-base AS nydus
+ARG NYDUS_VERSION
+ARG TARGETOS
+ARG TARGETARCH
+SHELL ["/bin/bash", "-c"]
+RUN wget https://github.com/dragonflyoss/image-service/releases/download/$NYDUS_VERSION/nydus-static-$NYDUS_VERSION-$TARGETOS-$TARGETARCH.tgz
+RUN mkdir -p /out/nydus-static && tar xzvf nydus-static-$NYDUS_VERSION-$TARGETOS-$TARGETARCH.tgz -C /out
-# Copy together all binaries for oci+containerd mode
-FROM buildkit-export AS buildkit-buildkitd-linux
+FROM buildkit-export AS buildkit-linux
COPY --link --from=binaries / /usr/bin/
ENTRYPOINT ["buildkitd"]
-FROM binaries AS buildkit-buildkitd-darwin
+FROM binaries AS buildkit-darwin
-FROM binaries AS buildkit-buildkitd-windows
+FROM binaries AS buildkit-windows
# this is not in binaries-windows because it is not intended for release yet, just CI
COPY --link --from=buildkitd /usr/bin/buildkitd /buildkitd.exe
-FROM buildkit-buildkitd-$TARGETOS AS buildkit-buildkitd
-
-FROM alpine:${ALPINE_VERSION} AS containerd-runtime
-COPY --link --from=runc /usr/bin/runc /usr/bin/
-COPY --link --from=containerd /out/containerd* /usr/bin/
-COPY --link --from=containerd /out/ctr /usr/bin/
-VOLUME /var/lib/containerd
-VOLUME /run/containerd
-ENTRYPOINT ["containerd"]
-
FROM --platform=$BUILDPLATFORM alpine:${ALPINE_VERSION} AS cni-plugins
RUN apk add --no-cache curl
ARG CNI_VERSION
@@ -223,31 +218,41 @@ ARG TARGETOS
ARG TARGETARCH
WORKDIR /opt/cni/bin
RUN curl -Ls https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-$TARGETOS-$TARGETARCH-$CNI_VERSION.tgz | tar xzv
+COPY --link --from=dnsname /usr/bin/dnsname /opt/cni/bin/
FROM buildkit-base AS integration-tests-base
ENV BUILDKIT_INTEGRATION_ROOTLESS_IDPAIR="1000:1000"
-RUN apk add --no-cache shadow shadow-uidmap sudo vim iptables fuse \
+ARG NERDCTL_VERSION
+RUN apk add --no-cache shadow shadow-uidmap sudo vim iptables ip6tables dnsmasq fuse curl git-daemon \
&& useradd --create-home --home-dir /home/user --uid 1000 -s /bin/sh user \
&& echo "XDG_RUNTIME_DIR=/run/user/1000; export XDG_RUNTIME_DIR" >> /home/user/.profile \
&& mkdir -m 0700 -p /run/user/1000 \
&& chown -R user /run/user/1000 /home/user \
&& ln -s /sbin/iptables-legacy /usr/bin/iptables \
- && xx-go --wrap
+ && xx-go --wrap \
+ && curl -Ls https://raw.githubusercontent.com/containerd/nerdctl/$NERDCTL_VERSION/extras/rootless/containerd-rootless.sh > /usr/bin/containerd-rootless.sh \
+ && chmod 0755 /usr/bin/containerd-rootless.sh
+# The entrypoint script is needed for enabling nested cgroup v2 (https://github.com/moby/buildkit/issues/3265#issuecomment-1309631736)
+RUN curl -Ls https://raw.githubusercontent.com/moby/moby/v20.10.21/hack/dind > /docker-entrypoint.sh \
+ && chmod 0755 /docker-entrypoint.sh
+ENTRYPOINT ["/docker-entrypoint.sh"]
# musl is needed to directly use the registry binary that is built on alpine
-ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.4=/opt/containerd-alt-14/bin,containerd-1.5=/opt/containerd-alt-15/bin"
+ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.5=/opt/containerd-alt-15/bin"
ENV BUILDKIT_INTEGRATION_SNAPSHOTTER=stargz
ENV CGO_ENABLED=0
+COPY --link --from=nydus /out/nydus-static/* /usr/bin/
COPY --link --from=stargz-snapshotter /out/* /usr/bin/
COPY --link --from=rootlesskit /rootlesskit /usr/bin/
-COPY --link --from=containerd-alt-14 /out/containerd* /opt/containerd-alt-14/bin/
COPY --link --from=containerd-alt-15 /out/containerd* /opt/containerd-alt-15/bin/
COPY --link --from=registry /bin/registry /usr/bin/
COPY --link --from=runc /usr/bin/runc /usr/bin/
COPY --link --from=containerd /out/containerd* /usr/bin/
-COPY --link --from=cni-plugins /opt/cni/bin/bridge /opt/cni/bin/host-local /opt/cni/bin/loopback /opt/cni/bin/
+COPY --link --from=cni-plugins /opt/cni/bin/bridge /opt/cni/bin/host-local /opt/cni/bin/loopback /opt/cni/bin/firewall /opt/cni/bin/dnsname /opt/cni/bin/
COPY --link hack/fixtures/cni.json /etc/buildkit/cni.json
+COPY --link hack/fixtures/dns-cni.conflist /etc/buildkit/dns-cni.conflist
COPY --link --from=binaries / /usr/bin/
+# integration-tests prepares an image suitable for running all tests
FROM integration-tests-base AS integration-tests
COPY . .
ENV BUILDKIT_RUN_NETWORK_INTEGRATION_TESTS=1 BUILDKIT_CNI_INIT_LOCK_PATH=/run/buildkit_cni_bridge.lock
@@ -256,7 +261,7 @@ FROM integration-tests AS dev-env
VOLUME /var/lib/buildkit
# Rootless mode.
-FROM tonistiigi/alpine:${ALPINE_VERSION} AS rootless
+FROM alpinebase AS rootless
RUN apk add --no-cache fuse3 fuse-overlayfs git openssh pigz shadow-uidmap xz
RUN adduser -D -u 1000 user \
&& mkdir -p /run/user/1000 /home/user/.local/tmp /home/user/.local/share/buildkit \
@@ -275,7 +280,5 @@ ENV BUILDKIT_HOST=unix:///run/user/1000/buildkit/buildkitd.sock
VOLUME /home/user/.local/share/buildkit
ENTRYPOINT ["rootlesskit", "buildkitd"]
-
-FROM buildkit-${BUILDKIT_TARGET}
-
-
+# buildkit builds the buildkit container image
+FROM buildkit-$TARGETOS AS buildkit
diff --git a/MAINTAINERS b/MAINTAINERS
index b35691790d0c..d8a8221b4e08 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -155,6 +155,7 @@ made through a pull request.
"crazy-max",
"hinshun",
"ijc",
+ "jedevc",
"ktock",
"sipsma",
"tiborvass",
@@ -208,6 +209,11 @@ made through a pull request.
Email = "ian.campbell@docker.com"
GitHub = "ijc"
+ [people.jedevc]
+ Name = "Justin Chadwell"
+ Email = "me@jedevc.com"
+ GitHub = "jedevc"
+
[people.ktock]
Name = "Kohei Tokunaga"
Email = "ktokunaga.mail@gmail.com"
diff --git a/README.md b/README.md
index ebea076eb16b..c295a095819d 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
[![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU)
-# BuildKit
+# BuildKit
[![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb)
[![Build Status](https://github.com/moby/buildkit/workflows/build/badge.svg)](https://github.com/moby/buildkit/actions?query=workflow%3Abuild)
@@ -26,12 +26,18 @@ Read the proposal from https://github.com/moby/moby/issues/32925
Introductory blog post https://blog.mobyproject.org/introducing-buildkit-17e056cc5317
-Join `#buildkit` channel on [Docker Community Slack](http://dockr.ly/slack)
+Join `#buildkit` channel on [Docker Community Slack](https://dockr.ly/comm-slack)
-:information_source: If you are visiting this repo for the usage of BuildKit-only Dockerfile features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`, please refer to [`frontend/dockerfile/docs/syntax.md`](frontend/dockerfile/docs/syntax.md).
+> **Note**
+>
+> If you are visiting this repo for the usage of BuildKit-only Dockerfile features
+> like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`, please refer to [`frontend/dockerfile/docs/reference.md`](frontend/dockerfile/docs/reference.md)
-:information_source: [BuildKit has been integrated to `docker build` since Docker 18.06 .](https://docs.docker.com/develop/develop-images/build_enhancements/)
-You don't need to read this document unless you want to use the full-featured standalone version of BuildKit.
+> **Note**
+>
+> [BuildKit has been integrated to `docker build` since Docker 18.09](https://docs.docker.com/develop/develop-images/build_enhancements/).
+> You don't need to read this document unless you want to use the full-featured
+> standalone version of BuildKit.
@@ -39,12 +45,11 @@ You don't need to read this document unless you want to use the full-featured st
- [Used by](#used-by)
- [Quick start](#quick-start)
- - [Starting the `buildkitd` daemon:](#starting-the-buildkitd-daemon)
+ - [Starting the `buildkitd` daemon](#starting-the-buildkitd-daemon)
- [Exploring LLB](#exploring-llb)
- [Exploring Dockerfiles](#exploring-dockerfiles)
- [Building a Dockerfile with `buildctl`](#building-a-dockerfile-with-buildctl)
- - [Building a Dockerfile using external frontend:](#building-a-dockerfile-using-external-frontend)
- - [Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`](#building-a-dockerfile-with-experimental-features-like-run---mounttypebindcachetmpfssecretssh)
+ - [Building a Dockerfile using external frontend](#building-a-dockerfile-using-external-frontend)
- [Output](#output)
- [Image/Registry](#imageregistry)
- [Local directory](#local-directory)
@@ -58,6 +63,8 @@ You don't need to read this document unless you want to use the full-featured st
- [Registry (push image and cache separately)](#registry-push-image-and-cache-separately)
- [Local directory](#local-directory-1)
- [GitHub Actions cache (experimental)](#github-actions-cache-experimental)
+ - [S3 cache (experimental)](#s3-cache-experimental)
+ - [Azure Blob Storage cache (experimental)](#azure-blob-storage-cache-experimental)
- [Consistent hashing](#consistent-hashing)
- [Metadata](#metadata)
- [Systemd socket activation](#systemd-socket-activation)
@@ -70,6 +77,8 @@ You don't need to read this document unless you want to use the full-featured st
- [Opentracing support](#opentracing-support)
- [Running BuildKit without root privileges](#running-buildkit-without-root-privileges)
- [Building multi-platform images](#building-multi-platform-images)
+ - [Configuring `buildctl`](#configuring-buildctl)
+ - [Color Output Controls](#color-output-controls)
- [Contributing](#contributing)
@@ -93,6 +102,8 @@ BuildKit is used by the following projects:
- [Earthly earthfiles](https://github.com/vladaionescu/earthly)
- [Gitpod](https://github.com/gitpod-io/gitpod)
- [Dagger](https://dagger.io)
+- [envd](https://github.com/tensorchord/envd/)
+- [Depot](https://depot.dev)
## Quick start
@@ -114,7 +125,9 @@ $ brew install buildkit
To build BuildKit from source, see [`.github/CONTRIBUTING.md`](./.github/CONTRIBUTING.md).
-### Starting the `buildkitd` daemon:
+For a `buildctl` reference, see [this document](./docs/buildctl.md).
+
+### Starting the `buildkitd` daemon
You need to run `buildkitd` as the root user on the host.
@@ -130,7 +143,7 @@ By default, the OCI (runc) worker is used. You can set `--oci-worker=false --con
We are open to adding more backends.
-To start the buildkitd daemon using systemd socket activiation, you can install the buildkit systemd unit files.
+To start the buildkitd daemon using systemd socket activation, you can install the buildkit systemd unit files.
See [Systemd socket activation](#systemd-socket-activation)
The buildkitd daemon listens gRPC API on `/run/buildkit/buildkitd.sock` by default, but you can also use TCP sockets.
@@ -157,7 +170,10 @@ Currently, the following high-level languages has been implemented for LLB:
- [HLB](https://github.com/openllb/hlb)
- [Earthfile (Earthly)](https://github.com/earthly/earthly)
- [Cargo Wharf (Rust)](https://github.com/denzp/cargo-wharf)
-- [Nix](https://github.com/AkihiroSuda/buildkit-nix)
+- [Nix](https://github.com/reproducible-containers/buildkit-nix)
+- [mopy (Python)](https://github.com/cmdjulian/mopy)
+- [envd (starlark)](https://github.com/tensorchord/envd/)
+- [Blubber](https://gitlab.wikimedia.org/repos/releng/blubber)
- (open a PR to add your own language)
### Exploring Dockerfiles
@@ -184,7 +200,9 @@ buildctl build \
`--local` exposes local source files from client to the builder. `context` and `dockerfile` are the names Dockerfile frontend looks for build context and Dockerfile location.
-#### Building a Dockerfile using external frontend:
+If the Dockerfile has a different filename it can be specified with `--opt filename=./Dockerfile-alternative`.
+
+#### Building a Dockerfile using external frontend
External versions of the Dockerfile frontend are pushed to https://hub.docker.com/r/docker/dockerfile-upstream and https://hub.docker.com/r/docker/dockerfile and can be used with the gateway frontend. The source for the external frontend is currently located in `./frontend/dockerfile/cmd/dockerfile-frontend` but will move out of this repository in the future ([#163](https://github.com/moby/buildkit/issues/163)). For automatic build from master branch of this repository `docker/dockerfile-upstream:master` or `docker/dockerfile-upstream:master-labs` image can be used.
@@ -201,10 +219,6 @@ buildctl build \
--opt build-arg:APT_MIRROR=cdn-fastly.deb.debian.org
```
-#### Building a Dockerfile with experimental features like `RUN --mount=type=(bind|cache|tmpfs|secret|ssh)`
-
-See [`frontend/dockerfile/docs/experimental.md`](frontend/dockerfile/docs/experimental.md).
-
### Output
By default, the build result and intermediate cache will only remain internally in BuildKit. An output needs to be specified to retrieve the result.
@@ -215,7 +229,13 @@ By default, the build result and intermediate cache will only remain internally
buildctl build ... --output type=image,name=docker.io/username/image,push=true
```
-To export the cache embed with the image and pushing them to registry together, type `registry` is required to import the cache, you should specify `--export-cache type=inline` and `--import-cache type=registry,ref=...`. To export the cache to a local directy, you should specify `--export-cache type=local`.
+To export the image to multiple registries:
+
+```bash
+buildctl build ... --output type=image,\"name=docker.io/username/image,docker.io/username2/image2\",push=true
+```
+
+To export the cache embed with the image and pushing them to registry together, type `registry` is required to import the cache, you should specify `--export-cache type=inline` and `--import-cache type=registry,ref=...`. To export the cache to a local directly, you should specify `--export-cache type=local`.
Details in [Export cache](#export-cache).
```bash
@@ -226,23 +246,35 @@ buildctl build ...\
```
Keys supported by image output:
-* `name=[value]`: image name
+* `name=`: specify image name(s)
* `push=true`: push after creating the image
* `push-by-digest=true`: push unnamed image
* `registry.insecure=true`: push to insecure HTTP registry
* `oci-mediatypes=true`: use OCI mediatypes in configuration JSON instead of Docker's
* `unpack=true`: unpack image after creation (for use with containerd)
-* `dangling-name-prefix=[value]`: name image with `prefix@` , used for anonymous images
+* `dangling-name-prefix=`: name image with `prefix@`, used for anonymous images
* `name-canonical=true`: add additional canonical name `name@`
-* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`.
-* `compression-level=[value]`: compression level for gzip, estargz (0-9) and zstd (0-22)
-* `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers).
-* `buildinfo=true`: inline build info in [image config](docs/build-repro.md#image-config) (default `true`).
-* `buildinfo-attrs=true`: inline build info attributes in [image config](docs/build-repro.md#image-config) (default `false`).
+* `compression=`: choose compression type for layers newly created and cached, gzip is default value. estargz should be used with `oci-mediatypes=true`.
+* `compression-level=`: compression level for gzip, estargz (0-9) and zstd (0-22)
+* `force-compression=true`: forcefully apply `compression` option to all layers (including already existing layers)
+* `buildinfo=true`: attach inline build info in [image config](docs/buildinfo.md#image-config) (default `true`)
+* `buildinfo-attrs=true`: attach inline build info attributes in [image config](docs/buildinfo.md#image-config) (default `false`)
+* `store=true`: store the result images to the worker's (e.g. containerd) image store as well as ensures that the image has all blobs in the content store (default `true`). Ignored if the worker doesn't have image store (e.g. OCI worker).
+* `annotation.=`: attach an annotation with the respective `key` and `value` to the built image
+ * Using the extended syntaxes, `annotation-.=`, `annotation[].=` and both combined with `annotation-[].=`, allows configuring exactly where to attach the annotation.
+ * `` specifies what object to attach to, and can be any of `manifest` (the default), `manifest-descriptor`, `index` and `index-descriptor`
+ * `` specifies which objects to attach to (by default, all), and is the same key passed into the `platform` opt, see [`docs/multi-platform.md`](docs/multi-platform.md).
+ * See [`docs/annotations.md`](docs/annotations.md) for more details.
If credentials are required, `buildctl` will attempt to read Docker configuration file `$DOCKER_CONFIG/config.json`.
`$DOCKER_CONFIG` defaults to `~/.docker`.
+> **Warning**
+>
+> Build information along `buildinfo` and `buildinfo-attrs` attributes are
+> deprecated and will be removed in the next release. See the [Deprecated features page](./docs/deprecated.md)
+> for status and alternative recommendation about this feature.
+
#### Local directory
The local client will copy the files directly to the client. This is useful if BuildKit is being used for building something else than container images.
@@ -285,6 +317,7 @@ buildctl build ... --output type=docker,name=myimage | docker load
buildctl build ... --output type=oci,dest=path/to/output.tar
buildctl build ... --output type=oci > output.tar
```
+
#### containerd image store
The containerd worker needs to be used
@@ -296,7 +329,6 @@ ctr --namespace=buildkit images ls
To change the containerd namespace, you need to change `worker.containerd.namespace` in [`/etc/buildkit/buildkitd.toml`](./docs/buildkitd.toml.md).
-
## Cache
To show local build cache (`/var/lib/buildkit`):
@@ -356,17 +388,19 @@ buildctl build ... \
`--export-cache` options:
* `type=registry`
-* `mode=min` (default): only export layers for the resulting image
-* `mode=max`: export all the layers of all intermediate steps.
-* `ref=docker.io/user/image:tag`: reference
-* `oci-mediatypes=true|false`: whether to use OCI mediatypes in exported manifests. Since BuildKit `v0.8` defaults to true.
-* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true`.
-* `compression-level=[value]`: compression level for gzip, estargz (0-9) and zstd (0-22)
-* `force-compression=true`: forcibly apply `compression` option to all layers.
+* `mode=`: specify cache layers to export (default: `min`)
+ * `min`: only export layers for the resulting image
+ * `max`: export all the layers of all intermediate steps
+* `ref=[`: specify repository reference to store cache, e.g. `docker.io/user/image:tag`
+* `oci-mediatypes=]`: whether to use OCI mediatypes in exported manifests (default: `true`, since BuildKit `v0.8`)
+* `compression=`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true`
+* `compression-level=`: choose compression level for gzip, estargz (0-9) and zstd (0-22)
+* `force-compression=true`: forcibly apply `compression` option to all layers
+* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`)
`--import-cache` options:
* `type=registry`
-* `ref=docker.io/user/image:tag`: reference
+* `ref=[`: specify repository reference to retrieve cache from, e.g. `docker.io/user/image:tag`
#### Local directory
@@ -379,19 +413,22 @@ The directory layout conforms to OCI Image Spec v1.0.
`--export-cache` options:
* `type=local`
-* `mode=min` (default): only export layers for the resulting image
-* `mode=max`: export all the layers of all intermediate steps.
-* `dest=path/to/output-dir`: destination directory for cache exporter
-* `oci-mediatypes=true|false`: whether to use OCI mediatypes in exported manifests. Since BuildKit `v0.8` defaults to true.
-* `compression=[uncompressed,gzip,estargz,zstd]`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true`.
-* `compression-level=[value]`: compression level for gzip, estargz (0-9) and zstd (0-22)
-* `force-compression=true`: forcibly apply `compression` option to all layers.
+* `mode=]`: specify cache layers to export (default: `min`)
+ * `min`: only export layers for the resulting image
+ * `max`: export all the layers of all intermediate steps
+* `dest=`: destination directory for cache exporter
+* `tag=`: specify custom tag of image to write to local index (default: `latest`)
+* `oci-mediatypes=`: whether to use OCI mediatypes in exported manifests (default `true`, since BuildKit `v0.8`)
+* `compression=`: choose compression type for layers newly created and cached, gzip is default value. estargz and zstd should be used with `oci-mediatypes=true`.
+* `compression-level=`: compression level for gzip, estargz (0-9) and zstd (0-22)
+* `force-compression=true`: forcibly apply `compression` option to all layers
+* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`)
`--import-cache` options:
* `type=local`
-* `src=path/to/input-dir`: source directory for cache importer
-* `digest=sha256:deadbeef`: digest of the manifest list to import.
-* `tag=customtag`: custom tag of image. Defaults "latest" tag digest in `index.json` is for digest, not for tag
+* `src=`: source directory for cache importer
+* `tag=`: specify custom tag of image to read from local index (default: `latest`)
+* `digest=sha256:`: specify explicit digest of the manifest list to import
#### GitHub Actions cache (experimental)
@@ -402,31 +439,129 @@ buildctl build ... \
--import-cache type=gha
```
-Github Actions cache saves both cache metadata and layers to GitHub's Cache service. This cache currently has a [size limit of 10GB](https://docs.github.com/en/actions/advanced-guides/caching-dependencies-to-speed-up-workflows#usage-limits-and-eviction-policy) that is shared accross different caches in the repo. If you exceed this limit, GitHub will save your cache but will begin evicting caches until the total size is less than 10 GB. Recycling caches too often can result in slower runtimes overall.
+GitHub Actions cache saves both cache metadata and layers to GitHub's Cache service. This cache currently has a [size limit of 10GB](https://docs.github.com/en/actions/advanced-guides/caching-dependencies-to-speed-up-workflows#usage-limits-and-eviction-policy) that is shared accross different caches in the repo. If you exceed this limit, GitHub will save your cache but will begin evicting caches until the total size is less than 10 GB. Recycling caches too often can result in slower runtimes overall.
Similarly to using [actions/cache](https://github.com/actions/cache), caches are [scoped by branch](https://docs.github.com/en/actions/advanced-guides/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache), with the default and target branches being available to every branch.
-Following attributes are required to authenticate against the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication):
+Following attributes are required to authenticate against the [GitHub Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication):
* `url`: Cache server URL (default `$ACTIONS_CACHE_URL`)
* `token`: Access token (default `$ACTIONS_RUNTIME_TOKEN`)
:information_source: This type of cache can be used with [Docker Build Push Action](https://github.com/docker/build-push-action)
-where `url` and `token` will be automatically set. To use this backend in a inline `run` step, you have to include [crazy-max/ghaction-github-runtime](https://github.com/crazy-max/ghaction-github-runtime)
+where `url` and `token` will be automatically set. To use this backend in an inline `run` step, you have to include [crazy-max/ghaction-github-runtime](https://github.com/crazy-max/ghaction-github-runtime)
in your workflow to expose the runtime.
`--export-cache` options:
* `type=gha`
-* `mode=min` (default): only export layers for the resulting image
-* `mode=max`: export all the layers of all intermediate steps.
-* `scope=buildkit`: which scope cache object belongs to (default `buildkit`)
+* `mode=`: specify cache layers to export (default: `min`)
+ * `min`: only export layers for the resulting image
+ * `max`: export all the layers of all intermediate steps
+* `scope=`: which scope cache object belongs to (default `buildkit`)
+* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`)
`--import-cache` options:
* `type=gha`
-* `scope=buildkit`: which scope cache object belongs to (default `buildkit`)
+* `scope=`: which scope cache object belongs to (default `buildkit`)
+
+#### S3 cache (experimental)
+
+```bash
+buildctl build ... \
+ --output type=image,name=docker.io/username/image,push=true \
+ --export-cache type=s3,region=eu-west-1,bucket=my_bucket,name=my_image \
+ --import-cache type=s3,region=eu-west-1,bucket=my_bucket,name=my_image
+```
+
+The following attributes are required:
+* `bucket`: AWS S3 bucket (default: `$AWS_BUCKET`)
+* `region`: AWS region (default: `$AWS_REGION`)
+
+Storage locations:
+* blobs: `s3:////`, default: `s3:///blobs/`
+* manifests: `s3:////`, default: `s3:///manifests/`
+
+S3 configuration:
+* `blobs_prefix`: global prefix to store / read blobs on s3 (default: `blobs/`)
+* `manifests_prefix`: global prefix to store / read blobs on s3 (default: `manifests/`)
+* `endpoint_url`: specify a specific S3 endpoint (default: empty)
+* `use_path_style`: if set to `true`, put the bucket name in the URL instead of in the hostname (default: `false`)
+
+AWS Authentication:
+
+The simplest way is to use an IAM Instance profile.
+Others options are:
+
+* Any system using environment variables / config files supported by the [AWS Go SDK](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html). The configuration must be available for the buildkit daemon, not for the client.
+* Using the following attributes:
+ * `access_key_id`: Access Key ID
+ * `secret_access_key`: Secret Access Key
+ * `session_token`: Session Token
+
+`--export-cache` options:
+* `type=s3`
+* `mode=`: specify cache layers to export (default: `min`)
+ * `min`: only export layers for the resulting image
+ * `max`: export all the layers of all intermediate steps
+* `prefix=`: set global prefix to store / read files on s3 (default: empty)
+* `name=`: specify name of the manifest to use (default `buildkit`)
+ * Multiple manifest names can be specified at the same time, separated by `;`. The standard use case is to use the git sha1 as name, and the branch name as duplicate, and load both with 2 `import-cache` commands.
+* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`)
+
+`--import-cache` options:
+* `type=s3`
+* `prefix=`: set global prefix to store / read files on s3 (default: empty)
+* `blobs_prefix=`: set global prefix to store / read blobs on s3 (default: `blobs/`)
+* `manifests_prefix=`: set global prefix to store / read manifests on s3 (default: `manifests/`)
+* `name=`: name of the manifest to use (default `buildkit`)
+
+#### Azure Blob Storage cache (experimental)
+
+```bash
+buildctl build ... \
+ --output type=image,name=docker.io/username/image,push=true \
+ --export-cache type=azblob,account_url=https://myaccount.blob.core.windows.net,name=my_image \
+ --import-cache type=azblob,account_url=https://myaccount.blob.core.windows.net,name=my_image
+```
+
+The following attributes are required:
+* `account_url`: The Azure Blob Storage account URL (default: `$BUILDKIT_AZURE_STORAGE_ACCOUNT_URL`)
+
+Storage locations:
+* blobs: `///`, default: `//blobs/`
+* manifests: `///`, default: `//manifests/`
+
+Azure Blob Storage configuration:
+* `container`: The Azure Blob Storage container name (default: `buildkit-cache` or `$BUILDKIT_AZURE_STORAGE_CONTAINER` if set)
+* `blobs_prefix`: Global prefix to store / read blobs on the Azure Blob Storage container (``) (default: `blobs/`)
+* `manifests_prefix`: Global prefix to store / read blobs on the Azure Blob Storage container (``) (default: `manifests/`)
+
+Azure Blob Storage authentication:
+
+There are 2 options supported for Azure Blob Storage authentication:
+
+* Any system using environment variables supported by the [Azure SDK for Go](https://docs.microsoft.com/en-us/azure/developer/go/azure-sdk-authentication). The configuration must be available for the buildkit daemon, not for the client.
+* Secret Access Key, using the `secret_access_key` attribute to specify the primary or secondary account key for your Azure Blob Storage account. [Azure Blob Storage account keys](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage)
+
+`--export-cache` options:
+* `type=azblob`
+* `mode=`: specify cache layers to export (default: `min`)
+ * `min`: only export layers for the resulting image
+ * `max`: export all the layers of all intermediate steps
+* `prefix=`: set global prefix to store / read files on the Azure Blob Storage container (``) (default: empty)
+* `name=`: specify name of the manifest to use (default: `buildkit`)
+ * Multiple manifest names can be specified at the same time, separated by `;`. The standard use case is to use the git sha1 as name, and the branch name as duplicate, and load both with 2 `import-cache` commands.
+* `ignore-error=`: specify if error is ignored in case cache export fails (default: `false`)
+
+`--import-cache` options:
+* `type=azblob`
+* `prefix=`: set global prefix to store / read files on the Azure Blob Storage container (``) (default: empty)
+* `blobs_prefix=`: set global prefix to store / read blobs on the Azure Blob Storage container (``) (default: `blobs/`)
+* `manifests_prefix=`: set global prefix to store / read manifests on the Azure Blob Storage container (``) (default: `manifests/`)
+* `name=`: name of the manifest to use (default: `buildkit`)
### Consistent hashing
-If you have multiple BuildKit daemon instances but you don't want to use registry for sharing cache across the cluster,
+If you have multiple BuildKit daemon instances, but you don't want to use registry for sharing cache across the cluster,
consider client-side load balancing using consistent hashing.
See [`./examples/kubernetes/consistenthash`](./examples/kubernetes/consistenthash).
@@ -446,26 +581,6 @@ jq '.' metadata.json
```
```json
{
- "containerimage.buildinfo": {
- "frontend": "dockerfile.v0",
- "attrs": {
- "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
- "filename": "Dockerfile",
- "source": "docker/dockerfile:master"
- },
- "sources": [
- {
- "type": "docker-image",
- "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
- "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
- },
- {
- "type": "docker-image",
- "ref": "docker.io/library/alpine:3.13",
- "pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
- }
- ]
- },
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
"containerimage.descriptor": {
"annotations": {
@@ -512,7 +627,7 @@ buildctl \
`buildctl build` can be called against randomly load balanced the `buildkitd` daemon.
-See also [Consistent hashing](#consistenthashing) for client-side load balancing.
+See also [Consistent hashing](#consistent-hashing) for client-side load balancing.
## Containerizing BuildKit
@@ -603,7 +718,16 @@ Please refer to [`docs/rootless.md`](docs/rootless.md).
Please refer to [`docs/multi-platform.md`](docs/multi-platform.md).
+### Configuring `buildctl`
+
+#### Color Output Controls
+
+`buildctl` has support for modifying the colors that are used to output information to the terminal. You can set the environment variable `BUILDKIT_COLORS` to something like `run=green:warning=yellow:error=red:cancel=255,165,0` to set the colors that you would like to use. Setting `NO_COLOR` to anything will disable any colorized output as recommended by [no-color.org](https://no-color.org/).
+
+Parsing errors will be reported but ignored. This will result in default color values being used where needed.
+
+- [The list of pre-defined colors](https://github.com/moby/buildkit/blob/master/util/progress/progressui/colors.go).
+
## Contributing
Want to contribute to BuildKit? Awesome! You can find information about contributing to this project in the [CONTRIBUTING.md](/.github/CONTRIBUTING.md)
-
diff --git a/api/services/control/control.pb.go b/api/services/control/control.pb.go
index 939f2c2ca7d8..2567a0d9700a 100644
--- a/api/services/control/control.pb.go
+++ b/api/services/control/control.pb.go
@@ -6,12 +6,14 @@ package moby_buildkit_v1
import (
context "context"
fmt "fmt"
+ rpc "github.com/gogo/googleapis/google/rpc"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
_ "github.com/golang/protobuf/ptypes/timestamp"
types "github.com/moby/buildkit/api/types"
pb "github.com/moby/buildkit/solver/pb"
+ pb1 "github.com/moby/buildkit/sourcepolicy/pb"
github_com_moby_buildkit_util_entitlements "github.com/moby/buildkit/util/entitlements"
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
grpc "google.golang.org/grpc"
@@ -35,6 +37,34 @@ var _ = time.Kitchen
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+type BuildHistoryEventType int32
+
+const (
+ BuildHistoryEventType_STARTED BuildHistoryEventType = 0
+ BuildHistoryEventType_COMPLETE BuildHistoryEventType = 1
+ BuildHistoryEventType_DELETED BuildHistoryEventType = 2
+)
+
+var BuildHistoryEventType_name = map[int32]string{
+ 0: "STARTED",
+ 1: "COMPLETE",
+ 2: "DELETED",
+}
+
+var BuildHistoryEventType_value = map[string]int32{
+ "STARTED": 0,
+ "COMPLETE": 1,
+ "DELETED": 2,
+}
+
+func (x BuildHistoryEventType) String() string {
+ return proto.EnumName(BuildHistoryEventType_name, int32(x))
+}
+
+func (BuildHistoryEventType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{0}
+}
+
type PruneRequest struct {
Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"`
All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"`
@@ -347,6 +377,8 @@ type SolveRequest struct {
Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"`
Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"`
FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Internal bool `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"`
+ SourcePolicy *pb1.Policy `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -448,6 +480,20 @@ func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition {
return nil
}
+func (m *SolveRequest) GetInternal() bool {
+ if m != nil {
+ return m.Internal
+ }
+ return false
+}
+
+func (m *SolveRequest) GetSourcePolicy() *pb1.Policy {
+ if m != nil {
+ return m.SourcePolicy
+ }
+ return nil
+}
+
type CacheOptions struct {
// ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0.
// When ExportRefDeprecated is set, the solver appends
@@ -1240,978 +1286,1334 @@ func (m *ListWorkersResponse) GetRecord() []*types.WorkerRecord {
return nil
}
-func init() {
- proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest")
- proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest")
- proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse")
- proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord")
- proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest")
- proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry")
- proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry")
- proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry")
- proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions")
- proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry")
- proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry")
- proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptionsEntry.AttrsEntry")
- proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse")
- proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveResponse.ExporterResponseEntry")
- proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest")
- proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse")
- proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex")
- proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus")
- proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog")
- proto.RegisterType((*VertexWarning)(nil), "moby.buildkit.v1.VertexWarning")
- proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage")
- proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest")
- proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse")
+type InfoRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) }
-
-var fileDescriptor_0c5120591600887d = []byte{
- // 1543 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcd, 0x6f, 0x1b, 0x45,
- 0x14, 0xef, 0xda, 0xf1, 0xd7, 0x8b, 0x13, 0xa5, 0xd3, 0x52, 0xad, 0x16, 0x91, 0xa4, 0xdb, 0x22,
- 0x45, 0x55, 0xbb, 0x4e, 0x03, 0x85, 0x12, 0x3e, 0xd4, 0x3a, 0x2e, 0x34, 0x55, 0x23, 0xca, 0xa4,
- 0xa5, 0x52, 0x0f, 0x48, 0x6b, 0x7b, 0xbc, 0x59, 0x65, 0xbd, 0xb3, 0xcc, 0xcc, 0xa6, 0x35, 0x7f,
- 0x00, 0x67, 0x6e, 0xfc, 0x01, 0x1c, 0x38, 0x71, 0xe6, 0x2f, 0x40, 0xea, 0x91, 0x73, 0x0f, 0x01,
- 0xf5, 0x0e, 0xe2, 0xc8, 0x11, 0xcd, 0xc7, 0x3a, 0xeb, 0xd8, 0xce, 0x57, 0x39, 0x79, 0xde, 0xcc,
- 0x7b, 0xbf, 0x7d, 0x9f, 0x33, 0xef, 0x19, 0xe6, 0x3a, 0x34, 0x16, 0x8c, 0x46, 0x5e, 0xc2, 0xa8,
- 0xa0, 0x68, 0xa1, 0x4f, 0xdb, 0x03, 0xaf, 0x9d, 0x86, 0x51, 0x77, 0x37, 0x14, 0xde, 0xde, 0x4d,
- 0xe7, 0x46, 0x10, 0x8a, 0x9d, 0xb4, 0xed, 0x75, 0x68, 0xbf, 0x11, 0xd0, 0x80, 0x36, 0x14, 0x63,
- 0x3b, 0xed, 0x29, 0x4a, 0x11, 0x6a, 0xa5, 0x01, 0x9c, 0xa5, 0x80, 0xd2, 0x20, 0x22, 0x07, 0x5c,
- 0x22, 0xec, 0x13, 0x2e, 0xfc, 0x7e, 0x62, 0x18, 0xae, 0xe7, 0xf0, 0xe4, 0xc7, 0x1a, 0xd9, 0xc7,
- 0x1a, 0x9c, 0x46, 0x7b, 0x84, 0x35, 0x92, 0x76, 0x83, 0x26, 0xdc, 0x70, 0x37, 0xa6, 0x72, 0xfb,
- 0x49, 0xd8, 0x10, 0x83, 0x84, 0xf0, 0xc6, 0x73, 0xca, 0x76, 0x09, 0xd3, 0x02, 0xee, 0xf7, 0x16,
- 0xd4, 0x1f, 0xb1, 0x34, 0x26, 0x98, 0x7c, 0x9b, 0x12, 0x2e, 0xd0, 0x25, 0x28, 0xf7, 0xc2, 0x48,
- 0x10, 0x66, 0x5b, 0xcb, 0xc5, 0x95, 0x1a, 0x36, 0x14, 0x5a, 0x80, 0xa2, 0x1f, 0x45, 0x76, 0x61,
- 0xd9, 0x5a, 0xa9, 0x62, 0xb9, 0x44, 0x2b, 0x50, 0xdf, 0x25, 0x24, 0x69, 0xa5, 0xcc, 0x17, 0x21,
- 0x8d, 0xed, 0xe2, 0xb2, 0xb5, 0x52, 0x6c, 0xce, 0xbc, 0xdc, 0x5f, 0xb2, 0xf0, 0xc8, 0x09, 0x72,
- 0xa1, 0x26, 0xe9, 0xe6, 0x40, 0x10, 0x6e, 0xcf, 0xe4, 0xd8, 0x0e, 0xb6, 0xdd, 0x6b, 0xb0, 0xd0,
- 0x0a, 0xf9, 0xee, 0x13, 0xee, 0x07, 0xc7, 0xe9, 0xe2, 0x3e, 0x80, 0xf3, 0x39, 0x5e, 0x9e, 0xd0,
- 0x98, 0x13, 0x74, 0x0b, 0xca, 0x8c, 0x74, 0x28, 0xeb, 0x2a, 0xe6, 0xd9, 0xb5, 0x77, 0xbc, 0xc3,
- 0xb1, 0xf1, 0x8c, 0x80, 0x64, 0xc2, 0x86, 0xd9, 0xfd, 0xb1, 0x08, 0xb3, 0xb9, 0x7d, 0x34, 0x0f,
- 0x85, 0xcd, 0x96, 0x6d, 0x2d, 0x5b, 0x2b, 0x35, 0x5c, 0xd8, 0x6c, 0x21, 0x1b, 0x2a, 0x5b, 0xa9,
- 0xf0, 0xdb, 0x11, 0x31, 0xb6, 0x67, 0x24, 0xba, 0x08, 0xa5, 0xcd, 0xf8, 0x09, 0x27, 0xca, 0xf0,
- 0x2a, 0xd6, 0x04, 0x42, 0x30, 0xb3, 0x1d, 0x7e, 0x47, 0xb4, 0x99, 0x58, 0xad, 0x91, 0x03, 0xe5,
- 0x47, 0x3e, 0x23, 0xb1, 0xb0, 0x4b, 0x12, 0xb7, 0x59, 0xb0, 0x2d, 0x6c, 0x76, 0x50, 0x13, 0x6a,
- 0x1b, 0x8c, 0xf8, 0x82, 0x74, 0xef, 0x0a, 0xbb, 0xbc, 0x6c, 0xad, 0xcc, 0xae, 0x39, 0x9e, 0x4e,
- 0x0a, 0x2f, 0x4b, 0x0a, 0xef, 0x71, 0x96, 0x14, 0xcd, 0xea, 0xcb, 0xfd, 0xa5, 0x73, 0x3f, 0xfc,
- 0x21, 0x7d, 0x37, 0x14, 0x43, 0x77, 0x00, 0x1e, 0xfa, 0x5c, 0x3c, 0xe1, 0x0a, 0xa4, 0x72, 0x2c,
- 0xc8, 0x8c, 0x02, 0xc8, 0xc9, 0xa0, 0x45, 0x00, 0xe5, 0x84, 0x0d, 0x9a, 0xc6, 0xc2, 0xae, 0x2a,
- 0xdd, 0x73, 0x3b, 0x68, 0x19, 0x66, 0x5b, 0x84, 0x77, 0x58, 0x98, 0xa8, 0x50, 0xd7, 0x94, 0x7b,
- 0xf2, 0x5b, 0x12, 0x41, 0x7b, 0xf0, 0xf1, 0x20, 0x21, 0x36, 0x28, 0x86, 0xdc, 0x8e, 0x8c, 0xe5,
- 0xf6, 0x8e, 0xcf, 0x48, 0xd7, 0x9e, 0x55, 0xee, 0x32, 0x94, 0xf4, 0xaf, 0xf6, 0x04, 0xb7, 0xeb,
- 0x2a, 0xc8, 0x19, 0xe9, 0xfe, 0x54, 0x86, 0xfa, 0xb6, 0xcc, 0xf1, 0x2c, 0x1d, 0x16, 0xa0, 0x88,
- 0x49, 0xcf, 0xc4, 0x46, 0x2e, 0x91, 0x07, 0xd0, 0x22, 0xbd, 0x30, 0x0e, 0x95, 0x56, 0x05, 0x65,
- 0xf8, 0xbc, 0x97, 0xb4, 0xbd, 0x83, 0x5d, 0x9c, 0xe3, 0x40, 0x0e, 0x54, 0xef, 0xbd, 0x48, 0x28,
- 0x93, 0x29, 0x55, 0x54, 0x30, 0x43, 0x1a, 0x3d, 0x85, 0xb9, 0x6c, 0x7d, 0x57, 0x08, 0x26, 0x13,
- 0x55, 0xa6, 0xd1, 0xcd, 0xf1, 0x34, 0xca, 0x2b, 0xe5, 0x8d, 0xc8, 0xdc, 0x8b, 0x05, 0x1b, 0xe0,
- 0x51, 0x1c, 0x69, 0xe1, 0x36, 0xe1, 0x5c, 0x6a, 0xa8, 0xc2, 0x8f, 0x33, 0x52, 0xaa, 0xf3, 0x39,
- 0xa3, 0xb1, 0x20, 0x71, 0x57, 0x85, 0xbe, 0x86, 0x87, 0xb4, 0x54, 0x27, 0x5b, 0x6b, 0x75, 0x2a,
- 0x27, 0x52, 0x67, 0x44, 0xc6, 0xa8, 0x33, 0xb2, 0x87, 0xd6, 0xa1, 0xb4, 0xe1, 0x77, 0x76, 0x88,
- 0x8a, 0xf2, 0xec, 0xda, 0xe2, 0x38, 0xa0, 0x3a, 0xfe, 0x52, 0x85, 0x95, 0xab, 0x42, 0x3d, 0x87,
- 0xb5, 0x08, 0xfa, 0x06, 0xea, 0xf7, 0x62, 0x11, 0x8a, 0x88, 0xf4, 0x55, 0xc4, 0x6a, 0x32, 0x62,
- 0xcd, 0xf5, 0x57, 0xfb, 0x4b, 0x1f, 0x4c, 0xbd, 0x78, 0x52, 0x11, 0x46, 0x0d, 0x92, 0x93, 0xf2,
- 0x72, 0x10, 0x78, 0x04, 0x0f, 0x3d, 0x83, 0xf9, 0x4c, 0xd9, 0xcd, 0x38, 0x49, 0x05, 0xb7, 0x41,
- 0x59, 0xbd, 0x76, 0x42, 0xab, 0xb5, 0x90, 0x36, 0xfb, 0x10, 0x92, 0x73, 0x07, 0xd0, 0x78, 0xac,
- 0x64, 0x4e, 0xed, 0x92, 0x41, 0x96, 0x53, 0xbb, 0x64, 0x20, 0xcb, 0x7a, 0xcf, 0x8f, 0x52, 0x5d,
- 0xee, 0x35, 0xac, 0x89, 0xf5, 0xc2, 0x6d, 0x4b, 0x22, 0x8c, 0xbb, 0xf7, 0x54, 0x08, 0x5f, 0xc1,
- 0x85, 0x09, 0xaa, 0x4e, 0x80, 0xb8, 0x9a, 0x87, 0x18, 0xcf, 0xe9, 0x03, 0x48, 0xf7, 0x97, 0x22,
- 0xd4, 0xf3, 0x01, 0x43, 0xab, 0x70, 0x41, 0xdb, 0x89, 0x49, 0xaf, 0x45, 0x12, 0x46, 0x3a, 0xf2,
- 0x96, 0x30, 0xe0, 0x93, 0x8e, 0xd0, 0x1a, 0x5c, 0xdc, 0xec, 0x9b, 0x6d, 0x9e, 0x13, 0x29, 0xa8,
- 0x7a, 0x9c, 0x78, 0x86, 0x28, 0xbc, 0xa5, 0xa1, 0x94, 0x27, 0x72, 0x42, 0x45, 0x15, 0xb0, 0x8f,
- 0x8e, 0xce, 0x2a, 0x6f, 0xa2, 0xac, 0x8e, 0xdb, 0x64, 0x5c, 0xf4, 0x29, 0x54, 0xf4, 0x41, 0x56,
- 0x98, 0x57, 0x8e, 0xfe, 0x84, 0x06, 0xcb, 0x64, 0xa4, 0xb8, 0xb6, 0x83, 0xdb, 0xa5, 0x53, 0x88,
- 0x1b, 0x19, 0xe7, 0x3e, 0x38, 0xd3, 0x55, 0x3e, 0x4d, 0x0a, 0xb8, 0x3f, 0x5b, 0x70, 0x7e, 0xec,
- 0x43, 0xf2, 0xd5, 0x50, 0xf7, 0xa6, 0x86, 0x50, 0x6b, 0xd4, 0x82, 0x92, 0xae, 0xfc, 0x82, 0x52,
- 0xd8, 0x3b, 0x81, 0xc2, 0x5e, 0xae, 0xec, 0xb5, 0xb0, 0x73, 0x1b, 0xe0, 0x6c, 0xc9, 0xea, 0xfe,
- 0x6a, 0xc1, 0x9c, 0xa9, 0x32, 0xf3, 0xc4, 0xfa, 0xb0, 0x90, 0x95, 0x50, 0xb6, 0x67, 0x1e, 0xdb,
- 0x5b, 0x53, 0x0b, 0x54, 0xb3, 0x79, 0x87, 0xe5, 0xb4, 0x8e, 0x63, 0x70, 0xce, 0x46, 0x96, 0x57,
- 0x87, 0x58, 0x4f, 0xa5, 0xf9, 0x65, 0x98, 0xdb, 0x16, 0xbe, 0x48, 0xf9, 0xd4, 0x97, 0xc3, 0xfd,
- 0xc7, 0x82, 0xf9, 0x8c, 0xc7, 0x58, 0xf7, 0x3e, 0x54, 0xf7, 0x08, 0x13, 0xe4, 0x05, 0xe1, 0xc6,
- 0x2a, 0x7b, 0xdc, 0xaa, 0xaf, 0x15, 0x07, 0x1e, 0x72, 0xa2, 0x75, 0xa8, 0x72, 0x85, 0x43, 0xb2,
- 0x40, 0x2d, 0x4e, 0x93, 0x32, 0xdf, 0x1b, 0xf2, 0xa3, 0x06, 0xcc, 0x44, 0x34, 0xe0, 0xa6, 0x66,
- 0xde, 0x9e, 0x26, 0xf7, 0x90, 0x06, 0x58, 0x31, 0xa2, 0x8f, 0xa1, 0xfa, 0xdc, 0x67, 0x71, 0x18,
- 0x07, 0x59, 0x15, 0x2c, 0x4d, 0x13, 0x7a, 0xaa, 0xf9, 0xf0, 0x50, 0x40, 0x76, 0x3a, 0x65, 0x7d,
- 0x86, 0x1e, 0x40, 0xb9, 0x1b, 0x06, 0x84, 0x0b, 0xed, 0x92, 0xe6, 0x9a, 0xbc, 0xe4, 0x5f, 0xed,
- 0x2f, 0x5d, 0xcb, 0xdd, 0xe2, 0x34, 0x21, 0xb1, 0x6c, 0x76, 0xfd, 0x30, 0x26, 0x8c, 0x37, 0x02,
- 0x7a, 0x43, 0x8b, 0x78, 0x2d, 0xf5, 0x83, 0x0d, 0x82, 0xc4, 0x0a, 0xf5, 0x5d, 0xad, 0xee, 0x8b,
- 0xb3, 0x61, 0x69, 0x04, 0x59, 0x06, 0xb1, 0xdf, 0x27, 0xe6, 0x6d, 0x56, 0x6b, 0xd9, 0x38, 0x74,
- 0x64, 0x9e, 0x77, 0x55, 0x4b, 0x55, 0xc5, 0x86, 0x42, 0xeb, 0x50, 0xe1, 0xc2, 0x67, 0xf2, 0xce,
- 0x29, 0x9d, 0xb0, 0xe3, 0xc9, 0x04, 0xd0, 0x67, 0x50, 0xeb, 0xd0, 0x7e, 0x12, 0x11, 0x29, 0x5d,
- 0x3e, 0xa1, 0xf4, 0x81, 0x88, 0x4c, 0x3d, 0xc2, 0x18, 0x65, 0xaa, 0xd7, 0xaa, 0x61, 0x4d, 0xa0,
- 0x0f, 0x61, 0x2e, 0x61, 0x34, 0x60, 0x84, 0xf3, 0x2f, 0x18, 0x4d, 0x13, 0xf3, 0xc2, 0x9e, 0x97,
- 0x97, 0xf7, 0xa3, 0xfc, 0x01, 0x1e, 0xe5, 0x73, 0xff, 0x2e, 0x40, 0x3d, 0x9f, 0x22, 0x63, 0x4d,
- 0xe8, 0x03, 0x28, 0xeb, 0x84, 0xd3, 0xb9, 0x7e, 0x36, 0x1f, 0x6b, 0x84, 0x89, 0x3e, 0xb6, 0xa1,
- 0xd2, 0x49, 0x99, 0xea, 0x50, 0x75, 0xdf, 0x9a, 0x91, 0xd2, 0x52, 0x41, 0x85, 0x1f, 0x29, 0x1f,
- 0x17, 0xb1, 0x26, 0x64, 0xd3, 0x3a, 0x9c, 0x53, 0x4e, 0xd7, 0xb4, 0x0e, 0xc5, 0xf2, 0xf1, 0xab,
- 0xbc, 0x51, 0xfc, 0xaa, 0xa7, 0x8e, 0x9f, 0xfb, 0x9b, 0x05, 0xb5, 0x61, 0x6d, 0xe5, 0xbc, 0x6b,
- 0xbd, 0xb1, 0x77, 0x47, 0x3c, 0x53, 0x38, 0x9b, 0x67, 0x2e, 0x41, 0x99, 0x0b, 0x46, 0xfc, 0xbe,
- 0x1e, 0xa9, 0xb0, 0xa1, 0xe4, 0x2d, 0xd6, 0xe7, 0x81, 0x8a, 0x50, 0x1d, 0xcb, 0xa5, 0xfb, 0xaf,
- 0x05, 0x73, 0x23, 0xe5, 0xfe, 0xbf, 0xda, 0x72, 0x11, 0x4a, 0x11, 0xd9, 0x23, 0x7a, 0xe8, 0x2b,
- 0x62, 0x4d, 0xc8, 0x5d, 0xbe, 0x43, 0x99, 0x50, 0xca, 0xd5, 0xb1, 0x26, 0xa4, 0xce, 0x5d, 0x22,
- 0xfc, 0x30, 0x52, 0xf7, 0x52, 0x1d, 0x1b, 0x4a, 0xea, 0x9c, 0xb2, 0xc8, 0x34, 0xbe, 0x72, 0x89,
- 0x5c, 0x98, 0x09, 0xe3, 0x1e, 0x35, 0x69, 0xa3, 0x3a, 0x9b, 0x6d, 0x9a, 0xb2, 0x0e, 0xd9, 0x8c,
- 0x7b, 0x14, 0xab, 0x33, 0x74, 0x19, 0xca, 0xcc, 0x8f, 0x03, 0x92, 0x75, 0xbd, 0x35, 0xc9, 0x85,
- 0xe5, 0x0e, 0x36, 0x07, 0xae, 0x0b, 0x75, 0x35, 0x38, 0x6e, 0x11, 0x2e, 0xc7, 0x14, 0x99, 0xd6,
- 0x5d, 0x5f, 0xf8, 0xca, 0xec, 0x3a, 0x56, 0x6b, 0xf7, 0x3a, 0xa0, 0x87, 0x21, 0x17, 0x4f, 0xd5,
- 0xc0, 0xcb, 0x8f, 0x9b, 0x2a, 0xb7, 0xe1, 0xc2, 0x08, 0xb7, 0x79, 0x16, 0x3e, 0x39, 0x34, 0x57,
- 0x5e, 0x1d, 0xbf, 0x71, 0xd5, 0x5c, 0xed, 0x69, 0xc1, 0xd1, 0xf1, 0x72, 0xed, 0xaf, 0x22, 0x54,
- 0x36, 0xf4, 0x5f, 0x06, 0xe8, 0x31, 0xd4, 0x86, 0x63, 0x2b, 0x72, 0xc7, 0x61, 0x0e, 0xcf, 0xbf,
- 0xce, 0x95, 0x23, 0x79, 0x8c, 0x7e, 0xf7, 0xa1, 0xa4, 0x06, 0x78, 0x34, 0xe1, 0xdd, 0xc9, 0x4f,
- 0xf6, 0xce, 0xd1, 0x03, 0xf1, 0xaa, 0x25, 0x91, 0xd4, 0xa3, 0x3d, 0x09, 0x29, 0xdf, 0x6e, 0x3b,
- 0x4b, 0xc7, 0xbc, 0xf6, 0x68, 0x0b, 0xca, 0xe6, 0x26, 0x9b, 0xc4, 0x9a, 0x7f, 0x9a, 0x9d, 0xe5,
- 0xe9, 0x0c, 0x1a, 0x6c, 0xd5, 0x42, 0x5b, 0xc3, 0x09, 0x6a, 0x92, 0x6a, 0xf9, 0x34, 0x70, 0x8e,
- 0x39, 0x5f, 0xb1, 0x56, 0x2d, 0xf4, 0x0c, 0x66, 0x73, 0x81, 0x46, 0x13, 0x02, 0x3a, 0x9e, 0x35,
- 0xce, 0xbb, 0xc7, 0x70, 0x69, 0x65, 0x9b, 0xf5, 0x97, 0xaf, 0x17, 0xad, 0xdf, 0x5f, 0x2f, 0x5a,
- 0x7f, 0xbe, 0x5e, 0xb4, 0xda, 0x65, 0x55, 0xf2, 0xef, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x54,
- 0x8e, 0x72, 0x11, 0x36, 0x12, 0x00, 0x00,
+func (m *InfoRequest) Reset() { *m = InfoRequest{} }
+func (m *InfoRequest) String() string { return proto.CompactTextString(m) }
+func (*InfoRequest) ProtoMessage() {}
+func (*InfoRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{17}
+}
+func (m *InfoRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *InfoRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InfoRequest.Merge(m, src)
+}
+func (m *InfoRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *InfoRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_InfoRequest.DiscardUnknown(m)
}
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
+var xxx_messageInfo_InfoRequest proto.InternalMessageInfo
-// ControlClient is the client API for Control service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ControlClient interface {
- DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error)
- Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error)
- Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error)
- Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error)
- Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error)
- ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error)
+type InfoResponse struct {
+ BuildkitVersion *types.BuildkitVersion `protobuf:"bytes,1,opt,name=buildkitVersion,proto3" json:"buildkitVersion,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-type controlClient struct {
- cc *grpc.ClientConn
+func (m *InfoResponse) Reset() { *m = InfoResponse{} }
+func (m *InfoResponse) String() string { return proto.CompactTextString(m) }
+func (*InfoResponse) ProtoMessage() {}
+func (*InfoResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{18}
}
-
-func NewControlClient(cc *grpc.ClientConn) ControlClient {
- return &controlClient{cc}
+func (m *InfoResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
}
-
-func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) {
- out := new(DiskUsageResponse)
- err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, opts...)
- if err != nil {
- return nil, err
+func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- return out, nil
}
-
-func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[0], "/moby.buildkit.v1.Control/Prune", opts...)
- if err != nil {
- return nil, err
- }
- x := &controlPruneClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
+func (m *InfoResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InfoResponse.Merge(m, src)
}
-
-type Control_PruneClient interface {
- Recv() (*UsageRecord, error)
- grpc.ClientStream
+func (m *InfoResponse) XXX_Size() int {
+ return m.Size()
}
-
-type controlPruneClient struct {
- grpc.ClientStream
+func (m *InfoResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_InfoResponse.DiscardUnknown(m)
}
-func (x *controlPruneClient) Recv() (*UsageRecord, error) {
- m := new(UsageRecord)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
+var xxx_messageInfo_InfoResponse proto.InternalMessageInfo
-func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) {
- out := new(SolveResponse)
- err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, opts...)
- if err != nil {
- return nil, err
+func (m *InfoResponse) GetBuildkitVersion() *types.BuildkitVersion {
+ if m != nil {
+ return m.BuildkitVersion
}
- return out, nil
+ return nil
}
-func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[1], "/moby.buildkit.v1.Control/Status", opts...)
- if err != nil {
- return nil, err
- }
- x := &controlStatusClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
+type BuildHistoryRequest struct {
+ ActiveOnly bool `protobuf:"varint,1,opt,name=ActiveOnly,proto3" json:"ActiveOnly,omitempty"`
+ Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"`
+ EarlyExit bool `protobuf:"varint,3,opt,name=EarlyExit,proto3" json:"EarlyExit,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-type Control_StatusClient interface {
- Recv() (*StatusResponse, error)
- grpc.ClientStream
+func (m *BuildHistoryRequest) Reset() { *m = BuildHistoryRequest{} }
+func (m *BuildHistoryRequest) String() string { return proto.CompactTextString(m) }
+func (*BuildHistoryRequest) ProtoMessage() {}
+func (*BuildHistoryRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{19}
}
-
-type controlStatusClient struct {
- grpc.ClientStream
+func (m *BuildHistoryRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
}
-
-func (x *controlStatusClient) Recv() (*StatusResponse, error) {
- m := new(StatusResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
+func (m *BuildHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BuildHistoryRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- return m, nil
}
-
-func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[2], "/moby.buildkit.v1.Control/Session", opts...)
- if err != nil {
- return nil, err
- }
- x := &controlSessionClient{stream}
- return x, nil
+func (m *BuildHistoryRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildHistoryRequest.Merge(m, src)
}
-
-type Control_SessionClient interface {
- Send(*BytesMessage) error
- Recv() (*BytesMessage, error)
- grpc.ClientStream
+func (m *BuildHistoryRequest) XXX_Size() int {
+ return m.Size()
}
-
-type controlSessionClient struct {
- grpc.ClientStream
+func (m *BuildHistoryRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildHistoryRequest.DiscardUnknown(m)
}
-func (x *controlSessionClient) Send(m *BytesMessage) error {
- return x.ClientStream.SendMsg(m)
-}
+var xxx_messageInfo_BuildHistoryRequest proto.InternalMessageInfo
-func (x *controlSessionClient) Recv() (*BytesMessage, error) {
- m := new(BytesMessage)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
+func (m *BuildHistoryRequest) GetActiveOnly() bool {
+ if m != nil {
+ return m.ActiveOnly
}
- return m, nil
+ return false
}
-func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) {
- out := new(ListWorkersResponse)
- err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, opts...)
- if err != nil {
- return nil, err
+func (m *BuildHistoryRequest) GetRef() string {
+ if m != nil {
+ return m.Ref
}
- return out, nil
+ return ""
}
-// ControlServer is the server API for Control service.
-type ControlServer interface {
- DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error)
- Prune(*PruneRequest, Control_PruneServer) error
- Solve(context.Context, *SolveRequest) (*SolveResponse, error)
- Status(*StatusRequest, Control_StatusServer) error
- Session(Control_SessionServer) error
- ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error)
+func (m *BuildHistoryRequest) GetEarlyExit() bool {
+ if m != nil {
+ return m.EarlyExit
+ }
+ return false
}
-// UnimplementedControlServer can be embedded to have forward compatible implementations.
-type UnimplementedControlServer struct {
+type BuildHistoryEvent struct {
+ Type BuildHistoryEventType `protobuf:"varint,1,opt,name=type,proto3,enum=moby.buildkit.v1.BuildHistoryEventType" json:"type,omitempty"`
+ Record *BuildHistoryRecord `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (*UnimplementedControlServer) DiskUsage(ctx context.Context, req *DiskUsageRequest) (*DiskUsageResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DiskUsage not implemented")
+func (m *BuildHistoryEvent) Reset() { *m = BuildHistoryEvent{} }
+func (m *BuildHistoryEvent) String() string { return proto.CompactTextString(m) }
+func (*BuildHistoryEvent) ProtoMessage() {}
+func (*BuildHistoryEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{20}
}
-func (*UnimplementedControlServer) Prune(req *PruneRequest, srv Control_PruneServer) error {
- return status.Errorf(codes.Unimplemented, "method Prune not implemented")
+func (m *BuildHistoryEvent) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
}
-func (*UnimplementedControlServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented")
+func (m *BuildHistoryEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BuildHistoryEvent.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
}
-func (*UnimplementedControlServer) Status(req *StatusRequest, srv Control_StatusServer) error {
- return status.Errorf(codes.Unimplemented, "method Status not implemented")
+func (m *BuildHistoryEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildHistoryEvent.Merge(m, src)
}
-func (*UnimplementedControlServer) Session(srv Control_SessionServer) error {
- return status.Errorf(codes.Unimplemented, "method Session not implemented")
+func (m *BuildHistoryEvent) XXX_Size() int {
+ return m.Size()
}
-func (*UnimplementedControlServer) ListWorkers(ctx context.Context, req *ListWorkersRequest) (*ListWorkersResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented")
+func (m *BuildHistoryEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildHistoryEvent.DiscardUnknown(m)
}
-func RegisterControlServer(s *grpc.Server, srv ControlServer) {
- s.RegisterService(&_Control_serviceDesc, srv)
+var xxx_messageInfo_BuildHistoryEvent proto.InternalMessageInfo
+
+func (m *BuildHistoryEvent) GetType() BuildHistoryEventType {
+ if m != nil {
+ return m.Type
+ }
+ return BuildHistoryEventType_STARTED
}
-func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DiskUsageRequest)
- if err := dec(in); err != nil {
- return nil, err
+func (m *BuildHistoryEvent) GetRecord() *BuildHistoryRecord {
+ if m != nil {
+ return m.Record
}
- if interceptor == nil {
- return srv.(ControlServer).DiskUsage(ctx, in)
+ return nil
+}
+
+type BuildHistoryRecord struct {
+ Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
+ Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"`
+ FrontendAttrs map[string]string `protobuf:"bytes,3,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Exporters []*Exporter `protobuf:"bytes,4,rep,name=Exporters,proto3" json:"Exporters,omitempty"`
+ Error *rpc.Status `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
+ CreatedAt *time.Time `protobuf:"bytes,6,opt,name=CreatedAt,proto3,stdtime" json:"CreatedAt,omitempty"`
+ CompletedAt *time.Time `protobuf:"bytes,7,opt,name=CompletedAt,proto3,stdtime" json:"CompletedAt,omitempty"`
+ Logs *Descriptor `protobuf:"bytes,8,opt,name=logs,proto3" json:"logs,omitempty"`
+ ExporterResponse map[string]string `protobuf:"bytes,9,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Result *BuildResultInfo `protobuf:"bytes,10,opt,name=Result,proto3" json:"Result,omitempty"`
+ Results map[string]*BuildResultInfo `protobuf:"bytes,11,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Generation int32 `protobuf:"varint,12,opt,name=Generation,proto3" json:"Generation,omitempty"`
+ Trace *Descriptor `protobuf:"bytes,13,opt,name=trace,proto3" json:"trace,omitempty"`
+ Pinned bool `protobuf:"varint,14,opt,name=pinned,proto3" json:"pinned,omitempty"`
+ NumCachedSteps int32 `protobuf:"varint,15,opt,name=numCachedSteps,proto3" json:"numCachedSteps,omitempty"`
+ NumTotalSteps int32 `protobuf:"varint,16,opt,name=numTotalSteps,proto3" json:"numTotalSteps,omitempty"`
+ NumCompletedSteps int32 `protobuf:"varint,17,opt,name=numCompletedSteps,proto3" json:"numCompletedSteps,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BuildHistoryRecord) Reset() { *m = BuildHistoryRecord{} }
+func (m *BuildHistoryRecord) String() string { return proto.CompactTextString(m) }
+func (*BuildHistoryRecord) ProtoMessage() {}
+func (*BuildHistoryRecord) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{21}
+}
+func (m *BuildHistoryRecord) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildHistoryRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BuildHistoryRecord.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/moby.buildkit.v1.Control/DiskUsage",
+}
+func (m *BuildHistoryRecord) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildHistoryRecord.Merge(m, src)
+}
+func (m *BuildHistoryRecord) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildHistoryRecord) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildHistoryRecord.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildHistoryRecord proto.InternalMessageInfo
+
+func (m *BuildHistoryRecord) GetRef() string {
+ if m != nil {
+ return m.Ref
}
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest))
+ return ""
+}
+
+func (m *BuildHistoryRecord) GetFrontend() string {
+ if m != nil {
+ return m.Frontend
}
- return interceptor(ctx, in, info, handler)
+ return ""
}
-func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(PruneRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
+func (m *BuildHistoryRecord) GetFrontendAttrs() map[string]string {
+ if m != nil {
+ return m.FrontendAttrs
}
- return srv.(ControlServer).Prune(m, &controlPruneServer{stream})
+ return nil
}
-type Control_PruneServer interface {
- Send(*UsageRecord) error
- grpc.ServerStream
+func (m *BuildHistoryRecord) GetExporters() []*Exporter {
+ if m != nil {
+ return m.Exporters
+ }
+ return nil
}
-type controlPruneServer struct {
- grpc.ServerStream
+func (m *BuildHistoryRecord) GetError() *rpc.Status {
+ if m != nil {
+ return m.Error
+ }
+ return nil
}
-func (x *controlPruneServer) Send(m *UsageRecord) error {
- return x.ServerStream.SendMsg(m)
+func (m *BuildHistoryRecord) GetCreatedAt() *time.Time {
+ if m != nil {
+ return m.CreatedAt
+ }
+ return nil
}
-func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SolveRequest)
- if err := dec(in); err != nil {
- return nil, err
+func (m *BuildHistoryRecord) GetCompletedAt() *time.Time {
+ if m != nil {
+ return m.CompletedAt
}
- if interceptor == nil {
- return srv.(ControlServer).Solve(ctx, in)
+ return nil
+}
+
+func (m *BuildHistoryRecord) GetLogs() *Descriptor {
+ if m != nil {
+ return m.Logs
}
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/moby.buildkit.v1.Control/Solve",
+ return nil
+}
+
+func (m *BuildHistoryRecord) GetExporterResponse() map[string]string {
+ if m != nil {
+ return m.ExporterResponse
}
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServer).Solve(ctx, req.(*SolveRequest))
+ return nil
+}
+
+func (m *BuildHistoryRecord) GetResult() *BuildResultInfo {
+ if m != nil {
+ return m.Result
}
- return interceptor(ctx, in, info, handler)
+ return nil
}
-func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(StatusRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
+func (m *BuildHistoryRecord) GetResults() map[string]*BuildResultInfo {
+ if m != nil {
+ return m.Results
}
- return srv.(ControlServer).Status(m, &controlStatusServer{stream})
+ return nil
}
-type Control_StatusServer interface {
- Send(*StatusResponse) error
- grpc.ServerStream
+func (m *BuildHistoryRecord) GetGeneration() int32 {
+ if m != nil {
+ return m.Generation
+ }
+ return 0
}
-type controlStatusServer struct {
- grpc.ServerStream
+func (m *BuildHistoryRecord) GetTrace() *Descriptor {
+ if m != nil {
+ return m.Trace
+ }
+ return nil
}
-func (x *controlStatusServer) Send(m *StatusResponse) error {
- return x.ServerStream.SendMsg(m)
+func (m *BuildHistoryRecord) GetPinned() bool {
+ if m != nil {
+ return m.Pinned
+ }
+ return false
}
-func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(ControlServer).Session(&controlSessionServer{stream})
+func (m *BuildHistoryRecord) GetNumCachedSteps() int32 {
+ if m != nil {
+ return m.NumCachedSteps
+ }
+ return 0
}
-type Control_SessionServer interface {
- Send(*BytesMessage) error
- Recv() (*BytesMessage, error)
- grpc.ServerStream
+func (m *BuildHistoryRecord) GetNumTotalSteps() int32 {
+ if m != nil {
+ return m.NumTotalSteps
+ }
+ return 0
}
-type controlSessionServer struct {
- grpc.ServerStream
+func (m *BuildHistoryRecord) GetNumCompletedSteps() int32 {
+ if m != nil {
+ return m.NumCompletedSteps
+ }
+ return 0
}
-func (x *controlSessionServer) Send(m *BytesMessage) error {
- return x.ServerStream.SendMsg(m)
+type UpdateBuildHistoryRequest struct {
+ Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
+ Pinned bool `protobuf:"varint,2,opt,name=Pinned,proto3" json:"Pinned,omitempty"`
+ Delete bool `protobuf:"varint,3,opt,name=Delete,proto3" json:"Delete,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (x *controlSessionServer) Recv() (*BytesMessage, error) {
- m := new(BytesMessage)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
+func (m *UpdateBuildHistoryRequest) Reset() { *m = UpdateBuildHistoryRequest{} }
+func (m *UpdateBuildHistoryRequest) String() string { return proto.CompactTextString(m) }
+func (*UpdateBuildHistoryRequest) ProtoMessage() {}
+func (*UpdateBuildHistoryRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{22}
+}
+func (m *UpdateBuildHistoryRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UpdateBuildHistoryRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_UpdateBuildHistoryRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- return m, nil
+}
+func (m *UpdateBuildHistoryRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateBuildHistoryRequest.Merge(m, src)
+}
+func (m *UpdateBuildHistoryRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *UpdateBuildHistoryRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateBuildHistoryRequest.DiscardUnknown(m)
}
-func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListWorkersRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ControlServer).ListWorkers(ctx, in)
+var xxx_messageInfo_UpdateBuildHistoryRequest proto.InternalMessageInfo
+
+func (m *UpdateBuildHistoryRequest) GetRef() string {
+ if m != nil {
+ return m.Ref
}
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/moby.buildkit.v1.Control/ListWorkers",
+ return ""
+}
+
+func (m *UpdateBuildHistoryRequest) GetPinned() bool {
+ if m != nil {
+ return m.Pinned
}
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest))
+ return false
+}
+
+func (m *UpdateBuildHistoryRequest) GetDelete() bool {
+ if m != nil {
+ return m.Delete
}
- return interceptor(ctx, in, info, handler)
+ return false
}
-var _Control_serviceDesc = grpc.ServiceDesc{
- ServiceName: "moby.buildkit.v1.Control",
- HandlerType: (*ControlServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "DiskUsage",
- Handler: _Control_DiskUsage_Handler,
- },
- {
- MethodName: "Solve",
- Handler: _Control_Solve_Handler,
- },
- {
- MethodName: "ListWorkers",
- Handler: _Control_ListWorkers_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Prune",
- Handler: _Control_Prune_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "Status",
- Handler: _Control_Status_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "Session",
- Handler: _Control_Session_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "control.proto",
+type UpdateBuildHistoryResponse struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *PruneRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (m *UpdateBuildHistoryResponse) Reset() { *m = UpdateBuildHistoryResponse{} }
+func (m *UpdateBuildHistoryResponse) String() string { return proto.CompactTextString(m) }
+func (*UpdateBuildHistoryResponse) ProtoMessage() {}
+func (*UpdateBuildHistoryResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{23}
+}
+func (m *UpdateBuildHistoryResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UpdateBuildHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_UpdateBuildHistoryResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- return dAtA[:n], nil
}
-
-func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (m *UpdateBuildHistoryResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdateBuildHistoryResponse.Merge(m, src)
+}
+func (m *UpdateBuildHistoryResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *UpdateBuildHistoryResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdateBuildHistoryResponse.DiscardUnknown(m)
}
-func (m *PruneRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
+var xxx_messageInfo_UpdateBuildHistoryResponse proto.InternalMessageInfo
+
+type Descriptor struct {
+ MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+ Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+ Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+ Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Descriptor) Reset() { *m = Descriptor{} }
+func (m *Descriptor) String() string { return proto.CompactTextString(m) }
+func (*Descriptor) ProtoMessage() {}
+func (*Descriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{24}
+}
+func (m *Descriptor) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- if m.KeepBytes != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.KeepBytes))
- i--
- dAtA[i] = 0x20
+}
+func (m *Descriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Descriptor.Merge(m, src)
+}
+func (m *Descriptor) XXX_Size() int {
+ return m.Size()
+}
+func (m *Descriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_Descriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Descriptor proto.InternalMessageInfo
+
+func (m *Descriptor) GetMediaType() string {
+ if m != nil {
+ return m.MediaType
}
- if m.KeepDuration != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.KeepDuration))
- i--
- dAtA[i] = 0x18
+ return ""
+}
+
+func (m *Descriptor) GetSize_() int64 {
+ if m != nil {
+ return m.Size_
}
- if m.All {
- i--
- if m.All {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
+ return 0
+}
+
+func (m *Descriptor) GetAnnotations() map[string]string {
+ if m != nil {
+ return m.Annotations
}
- if len(m.Filter) > 0 {
- for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Filter[iNdEx])
- copy(dAtA[i:], m.Filter[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx])))
- i--
- dAtA[i] = 0xa
+ return nil
+}
+
+type BuildResultInfo struct {
+ Result *Descriptor `protobuf:"bytes,1,opt,name=Result,proto3" json:"Result,omitempty"`
+ Attestations []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BuildResultInfo) Reset() { *m = BuildResultInfo{} }
+func (m *BuildResultInfo) String() string { return proto.CompactTextString(m) }
+func (*BuildResultInfo) ProtoMessage() {}
+func (*BuildResultInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{25}
+}
+func (m *BuildResultInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildResultInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BuildResultInfo.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
}
+ return b[:n], nil
}
- return len(dAtA) - i, nil
+}
+func (m *BuildResultInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildResultInfo.Merge(m, src)
+}
+func (m *BuildResultInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildResultInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildResultInfo.DiscardUnknown(m)
}
-func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+var xxx_messageInfo_BuildResultInfo proto.InternalMessageInfo
+
+func (m *BuildResultInfo) GetResult() *Descriptor {
+ if m != nil {
+ return m.Result
}
- return dAtA[:n], nil
+ return nil
}
-func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (m *BuildResultInfo) GetAttestations() []*Descriptor {
+ if m != nil {
+ return m.Attestations
+ }
+ return nil
}
-func (m *DiskUsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Filter) > 0 {
- for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Filter[iNdEx])
- copy(dAtA[i:], m.Filter[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx])))
- i--
- dAtA[i] = 0xa
+type Exporter struct {
+ Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"`
+ Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Exporter) Reset() { *m = Exporter{} }
+func (m *Exporter) String() string { return proto.CompactTextString(m) }
+func (*Exporter) ProtoMessage() {}
+func (*Exporter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0c5120591600887d, []int{26}
+}
+func (m *Exporter) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Exporter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Exporter.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
}
+ return b[:n], nil
}
- return len(dAtA) - i, nil
+}
+func (m *Exporter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Exporter.Merge(m, src)
+}
+func (m *Exporter) XXX_Size() int {
+ return m.Size()
+}
+func (m *Exporter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Exporter.DiscardUnknown(m)
}
-func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+var xxx_messageInfo_Exporter proto.InternalMessageInfo
+
+func (m *Exporter) GetType() string {
+ if m != nil {
+ return m.Type
}
- return dAtA[:n], nil
+ return ""
}
-func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+func (m *Exporter) GetAttrs() map[string]string {
+ if m != nil {
+ return m.Attrs
+ }
+ return nil
}
-func (m *DiskUsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Record) > 0 {
- for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
+func init() {
+ proto.RegisterEnum("moby.buildkit.v1.BuildHistoryEventType", BuildHistoryEventType_name, BuildHistoryEventType_value)
+ proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest")
+ proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest")
+ proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse")
+ proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord")
+ proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry")
+ proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry")
+ proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry")
+ proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptionsEntry.AttrsEntry")
+ proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveResponse.ExporterResponseEntry")
+ proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest")
+ proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse")
+ proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex")
+ proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus")
+ proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog")
+ proto.RegisterType((*VertexWarning)(nil), "moby.buildkit.v1.VertexWarning")
+ proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage")
+ proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest")
+ proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse")
+ proto.RegisterType((*InfoRequest)(nil), "moby.buildkit.v1.InfoRequest")
+ proto.RegisterType((*InfoResponse)(nil), "moby.buildkit.v1.InfoResponse")
+ proto.RegisterType((*BuildHistoryRequest)(nil), "moby.buildkit.v1.BuildHistoryRequest")
+ proto.RegisterType((*BuildHistoryEvent)(nil), "moby.buildkit.v1.BuildHistoryEvent")
+ proto.RegisterType((*BuildHistoryRecord)(nil), "moby.buildkit.v1.BuildHistoryRecord")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.BuildHistoryRecord.ExporterResponseEntry")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.BuildHistoryRecord.FrontendAttrsEntry")
+ proto.RegisterMapType((map[string]*BuildResultInfo)(nil), "moby.buildkit.v1.BuildHistoryRecord.ResultsEntry")
+ proto.RegisterType((*UpdateBuildHistoryRequest)(nil), "moby.buildkit.v1.UpdateBuildHistoryRequest")
+ proto.RegisterType((*UpdateBuildHistoryResponse)(nil), "moby.buildkit.v1.UpdateBuildHistoryResponse")
+ proto.RegisterType((*Descriptor)(nil), "moby.buildkit.v1.Descriptor")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Descriptor.AnnotationsEntry")
+ proto.RegisterType((*BuildResultInfo)(nil), "moby.buildkit.v1.BuildResultInfo")
+ proto.RegisterType((*Exporter)(nil), "moby.buildkit.v1.Exporter")
+ proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.Exporter.AttrsEntry")
}
-func (m *UsageRecord) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
+func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) }
+
+var fileDescriptor_0c5120591600887d = []byte{
+ // 2261 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x59, 0xcd, 0x6e, 0x1b, 0xc9,
+ 0x11, 0xde, 0x21, 0x25, 0xfe, 0x14, 0x29, 0x59, 0x6a, 0x7b, 0x8d, 0xc9, 0xc4, 0x2b, 0xc9, 0xb3,
+ 0x76, 0x22, 0x38, 0xf6, 0x50, 0xcb, 0xac, 0x63, 0xaf, 0x9c, 0x38, 0x16, 0x45, 0x66, 0x2d, 0xc7,
+ 0x82, 0xb5, 0x2d, 0x79, 0x0d, 0x2c, 0xe0, 0x04, 0x23, 0xb2, 0x45, 0x0f, 0x34, 0x9c, 0x99, 0x74,
+ 0x37, 0xb5, 0xe6, 0x3e, 0x40, 0x80, 0xcd, 0x21, 0xc8, 0x25, 0xc8, 0x25, 0xf7, 0x9c, 0x72, 0xce,
+ 0x13, 0x04, 0xf0, 0x31, 0xe7, 0x3d, 0x38, 0x81, 0x1f, 0x20, 0xc8, 0x31, 0xb9, 0x05, 0xfd, 0x33,
+ 0xe4, 0x90, 0x33, 0x94, 0x28, 0xdb, 0x27, 0x76, 0x75, 0xd7, 0x57, 0x53, 0x55, 0x5d, 0x5d, 0x5d,
+ 0xd5, 0x84, 0x85, 0x76, 0x18, 0x70, 0x1a, 0xfa, 0x4e, 0x44, 0x43, 0x1e, 0xa2, 0xa5, 0x5e, 0x78,
+ 0x38, 0x70, 0x0e, 0xfb, 0x9e, 0xdf, 0x39, 0xf6, 0xb8, 0x73, 0xf2, 0x89, 0x75, 0xab, 0xeb, 0xf1,
+ 0x17, 0xfd, 0x43, 0xa7, 0x1d, 0xf6, 0x6a, 0xdd, 0xb0, 0x1b, 0xd6, 0x24, 0xe3, 0x61, 0xff, 0x48,
+ 0x52, 0x92, 0x90, 0x23, 0x25, 0xc0, 0x5a, 0xed, 0x86, 0x61, 0xd7, 0x27, 0x23, 0x2e, 0xee, 0xf5,
+ 0x08, 0xe3, 0x6e, 0x2f, 0xd2, 0x0c, 0x37, 0x13, 0xf2, 0xc4, 0xc7, 0x6a, 0xf1, 0xc7, 0x6a, 0x2c,
+ 0xf4, 0x4f, 0x08, 0xad, 0x45, 0x87, 0xb5, 0x30, 0x62, 0x9a, 0xbb, 0x36, 0x95, 0xdb, 0x8d, 0xbc,
+ 0x1a, 0x1f, 0x44, 0x84, 0xd5, 0xbe, 0x0e, 0xe9, 0x31, 0xa1, 0x1a, 0x50, 0x9f, 0x54, 0x57, 0xe9,
+ 0xe3, 0x46, 0x1e, 0xd3, 0xc3, 0x1a, 0x8d, 0xda, 0x35, 0xc6, 0x5d, 0xde, 0x8f, 0x3f, 0x72, 0xfb,
+ 0x14, 0x95, 0xfa, 0xb4, 0x4d, 0xa2, 0xd0, 0xf7, 0xda, 0x03, 0xa1, 0x98, 0x1a, 0x29, 0x98, 0xfd,
+ 0x5b, 0x03, 0xaa, 0x7b, 0xb4, 0x1f, 0x10, 0x4c, 0x7e, 0xd3, 0x27, 0x8c, 0xa3, 0xcb, 0x50, 0x38,
+ 0xf2, 0x7c, 0x4e, 0xa8, 0x69, 0xac, 0xe5, 0xd7, 0xcb, 0x58, 0x53, 0x68, 0x09, 0xf2, 0xae, 0xef,
+ 0x9b, 0xb9, 0x35, 0x63, 0xbd, 0x84, 0xc5, 0x10, 0xad, 0x43, 0xf5, 0x98, 0x90, 0xa8, 0xd9, 0xa7,
+ 0x2e, 0xf7, 0xc2, 0xc0, 0xcc, 0xaf, 0x19, 0xeb, 0xf9, 0xc6, 0xdc, 0xab, 0xd7, 0xab, 0x06, 0x1e,
+ 0x5b, 0x41, 0x36, 0x94, 0x05, 0xdd, 0x18, 0x70, 0xc2, 0xcc, 0xb9, 0x04, 0xdb, 0x68, 0xda, 0xbe,
+ 0x01, 0x4b, 0x4d, 0x8f, 0x1d, 0x3f, 0x65, 0x6e, 0xf7, 0x2c, 0x5d, 0xec, 0x47, 0xb0, 0x9c, 0xe0,
+ 0x65, 0x51, 0x18, 0x30, 0x82, 0x6e, 0x43, 0x81, 0x92, 0x76, 0x48, 0x3b, 0x92, 0xb9, 0x52, 0xff,
+ 0xc8, 0x99, 0x0c, 0x03, 0x47, 0x03, 0x04, 0x13, 0xd6, 0xcc, 0xf6, 0x9f, 0xf2, 0x50, 0x49, 0xcc,
+ 0xa3, 0x45, 0xc8, 0xed, 0x34, 0x4d, 0x63, 0xcd, 0x58, 0x2f, 0xe3, 0xdc, 0x4e, 0x13, 0x99, 0x50,
+ 0xdc, 0xed, 0x73, 0xf7, 0xd0, 0x27, 0xda, 0xf6, 0x98, 0x44, 0x97, 0x60, 0x7e, 0x27, 0x78, 0xca,
+ 0x88, 0x34, 0xbc, 0x84, 0x15, 0x81, 0x10, 0xcc, 0xed, 0x7b, 0xdf, 0x10, 0x65, 0x26, 0x96, 0x63,
+ 0x64, 0x41, 0x61, 0xcf, 0xa5, 0x24, 0xe0, 0xe6, 0xbc, 0x90, 0xdb, 0xc8, 0x99, 0x06, 0xd6, 0x33,
+ 0xa8, 0x01, 0xe5, 0x6d, 0x4a, 0x5c, 0x4e, 0x3a, 0x5b, 0xdc, 0x2c, 0xac, 0x19, 0xeb, 0x95, 0xba,
+ 0xe5, 0xa8, 0x4d, 0x76, 0xe2, 0xf8, 0x73, 0x0e, 0xe2, 0xf8, 0x6b, 0x94, 0x5e, 0xbd, 0x5e, 0xfd,
+ 0xe0, 0x0f, 0xff, 0x14, 0xbe, 0x1b, 0xc2, 0xd0, 0x03, 0x80, 0xc7, 0x2e, 0xe3, 0x4f, 0x99, 0x14,
+ 0x52, 0x3c, 0x53, 0xc8, 0x9c, 0x14, 0x90, 0xc0, 0xa0, 0x15, 0x00, 0xe9, 0x84, 0xed, 0xb0, 0x1f,
+ 0x70, 0xb3, 0x24, 0x75, 0x4f, 0xcc, 0xa0, 0x35, 0xa8, 0x34, 0x09, 0x6b, 0x53, 0x2f, 0x92, 0x5b,
+ 0x5d, 0x96, 0xee, 0x49, 0x4e, 0x09, 0x09, 0xca, 0x83, 0x07, 0x83, 0x88, 0x98, 0x20, 0x19, 0x12,
+ 0x33, 0x62, 0x2f, 0xf7, 0x5f, 0xb8, 0x94, 0x74, 0xcc, 0x8a, 0x74, 0x97, 0xa6, 0x84, 0x7f, 0x95,
+ 0x27, 0x98, 0x59, 0x95, 0x9b, 0x1c, 0x93, 0xf6, 0xef, 0x8a, 0x50, 0xdd, 0x17, 0xc7, 0x29, 0x0e,
+ 0x87, 0x25, 0xc8, 0x63, 0x72, 0xa4, 0xf7, 0x46, 0x0c, 0x91, 0x03, 0xd0, 0x24, 0x47, 0x5e, 0xe0,
+ 0x49, 0xad, 0x72, 0xd2, 0xf0, 0x45, 0x27, 0x3a, 0x74, 0x46, 0xb3, 0x38, 0xc1, 0x81, 0x2c, 0x28,
+ 0xb5, 0x5e, 0x46, 0x21, 0x15, 0x21, 0x95, 0x97, 0x62, 0x86, 0x34, 0x7a, 0x06, 0x0b, 0xf1, 0x78,
+ 0x8b, 0x73, 0x2a, 0x02, 0x55, 0x84, 0xd1, 0x27, 0xe9, 0x30, 0x4a, 0x2a, 0xe5, 0x8c, 0x61, 0x5a,
+ 0x01, 0xa7, 0x03, 0x3c, 0x2e, 0x47, 0x58, 0xb8, 0x4f, 0x18, 0x13, 0x1a, 0xca, 0xed, 0xc7, 0x31,
+ 0x29, 0xd4, 0xf9, 0x05, 0x0d, 0x03, 0x4e, 0x82, 0x8e, 0xdc, 0xfa, 0x32, 0x1e, 0xd2, 0x42, 0x9d,
+ 0x78, 0xac, 0xd4, 0x29, 0xce, 0xa4, 0xce, 0x18, 0x46, 0xab, 0x33, 0x36, 0x87, 0x36, 0x61, 0x7e,
+ 0xdb, 0x6d, 0xbf, 0x20, 0x72, 0x97, 0x2b, 0xf5, 0x95, 0xb4, 0x40, 0xb9, 0xfc, 0x44, 0x6e, 0x2b,
+ 0x93, 0x07, 0xf5, 0x03, 0xac, 0x20, 0xe8, 0x57, 0x50, 0x6d, 0x05, 0xdc, 0xe3, 0x3e, 0xe9, 0xc9,
+ 0x1d, 0x2b, 0x8b, 0x1d, 0x6b, 0x6c, 0x7e, 0xf7, 0x7a, 0xf5, 0x27, 0x53, 0xd3, 0x4f, 0x9f, 0x7b,
+ 0x7e, 0x8d, 0x24, 0x50, 0x4e, 0x42, 0x04, 0x1e, 0x93, 0x87, 0xbe, 0x82, 0xc5, 0x58, 0xd9, 0x9d,
+ 0x20, 0xea, 0x73, 0x66, 0x82, 0xb4, 0xba, 0x3e, 0xa3, 0xd5, 0x0a, 0xa4, 0xcc, 0x9e, 0x90, 0x24,
+ 0x9c, 0xbd, 0x13, 0x70, 0x42, 0x03, 0xd7, 0xd7, 0x21, 0x38, 0xa4, 0xd1, 0x8e, 0x88, 0x34, 0x91,
+ 0x25, 0xf7, 0x64, 0x6e, 0x34, 0xab, 0xd2, 0x35, 0xd7, 0xd3, 0x5f, 0x4d, 0xe6, 0x52, 0x47, 0x31,
+ 0xe3, 0x31, 0xa8, 0xf5, 0x00, 0x50, 0x3a, 0x24, 0x44, 0xe8, 0x1e, 0x93, 0x41, 0x1c, 0xba, 0xc7,
+ 0x64, 0x20, 0xb2, 0xc7, 0x89, 0xeb, 0xf7, 0x55, 0x56, 0x29, 0x63, 0x45, 0x6c, 0xe6, 0xee, 0x1a,
+ 0x42, 0x42, 0x7a, 0x17, 0xcf, 0x25, 0xe1, 0x0b, 0xb8, 0x98, 0xe1, 0x91, 0x0c, 0x11, 0xd7, 0x92,
+ 0x22, 0xd2, 0x47, 0x67, 0x24, 0xd2, 0xfe, 0x6b, 0x1e, 0xaa, 0xc9, 0xb8, 0x40, 0x1b, 0x70, 0x51,
+ 0xd9, 0x89, 0xc9, 0x51, 0x93, 0x44, 0x94, 0xb4, 0x45, 0x32, 0xd2, 0xc2, 0xb3, 0x96, 0x50, 0x1d,
+ 0x2e, 0xed, 0xf4, 0xf4, 0x34, 0x4b, 0x40, 0x72, 0xf2, 0xd8, 0x67, 0xae, 0xa1, 0x10, 0x3e, 0x54,
+ 0xa2, 0xa4, 0x27, 0x12, 0xa0, 0xbc, 0x8c, 0x8b, 0xcf, 0x4e, 0x0f, 0x5e, 0x27, 0x13, 0xab, 0xc2,
+ 0x23, 0x5b, 0x2e, 0xfa, 0x19, 0x14, 0xd5, 0x42, 0x7c, 0xfe, 0x3f, 0x3e, 0xfd, 0x13, 0x4a, 0x58,
+ 0x8c, 0x11, 0x70, 0x65, 0x07, 0x33, 0xe7, 0xcf, 0x01, 0xd7, 0x18, 0xeb, 0x21, 0x58, 0xd3, 0x55,
+ 0x3e, 0x4f, 0x08, 0xd8, 0x7f, 0x31, 0x60, 0x39, 0xf5, 0x21, 0x71, 0x39, 0xc9, 0xf4, 0xac, 0x44,
+ 0xc8, 0x31, 0x6a, 0xc2, 0xbc, 0x4a, 0x30, 0x39, 0xa9, 0xb0, 0x33, 0x83, 0xc2, 0x4e, 0x22, 0xbb,
+ 0x28, 0xb0, 0x75, 0x17, 0xe0, 0xed, 0x82, 0xd5, 0xfe, 0x9b, 0x01, 0x0b, 0xfa, 0x30, 0xeb, 0x9b,
+ 0xdc, 0x85, 0xa5, 0xf8, 0x08, 0xc5, 0x73, 0xfa, 0x4e, 0xbf, 0x3d, 0x35, 0x0f, 0x28, 0x36, 0x67,
+ 0x12, 0xa7, 0x74, 0x4c, 0x89, 0xb3, 0xb6, 0xe3, 0xb8, 0x9a, 0x60, 0x3d, 0x97, 0xe6, 0x57, 0x61,
+ 0x61, 0x5f, 0x96, 0x60, 0x53, 0x2f, 0x28, 0xfb, 0x3f, 0x06, 0x2c, 0xc6, 0x3c, 0xda, 0xba, 0x4f,
+ 0xa1, 0x74, 0x42, 0x28, 0x27, 0x2f, 0x09, 0xd3, 0x56, 0x99, 0x69, 0xab, 0xbe, 0x94, 0x1c, 0x78,
+ 0xc8, 0x89, 0x36, 0xa1, 0xa4, 0xca, 0x3d, 0x12, 0x6f, 0xd4, 0xca, 0x34, 0x94, 0xfe, 0xde, 0x90,
+ 0x1f, 0xd5, 0x60, 0xce, 0x0f, 0xbb, 0x4c, 0x9f, 0x99, 0xef, 0x4f, 0xc3, 0x3d, 0x0e, 0xbb, 0x58,
+ 0x32, 0xa2, 0x7b, 0x50, 0xfa, 0xda, 0xa5, 0x81, 0x17, 0x74, 0xe3, 0x53, 0xb0, 0x3a, 0x0d, 0xf4,
+ 0x4c, 0xf1, 0xe1, 0x21, 0x40, 0x14, 0x54, 0x05, 0xb5, 0x86, 0x1e, 0x41, 0xa1, 0xe3, 0x75, 0x09,
+ 0xe3, 0xca, 0x25, 0x8d, 0xba, 0xb8, 0x4b, 0xbe, 0x7b, 0xbd, 0x7a, 0x23, 0x71, 0x59, 0x84, 0x11,
+ 0x09, 0x44, 0xf9, 0xee, 0x7a, 0x01, 0xa1, 0xa2, 0xbc, 0xbd, 0xa5, 0x20, 0x4e, 0x53, 0xfe, 0x60,
+ 0x2d, 0x41, 0xc8, 0xf2, 0xd4, 0x95, 0x20, 0xf3, 0xc5, 0xdb, 0xc9, 0x52, 0x12, 0xc4, 0x31, 0x08,
+ 0xdc, 0x1e, 0xd1, 0x25, 0x80, 0x1c, 0x8b, 0xfa, 0xa4, 0x2d, 0xe2, 0xbc, 0x23, 0x2b, 0xb7, 0x12,
+ 0xd6, 0x14, 0xda, 0x84, 0x22, 0xe3, 0x2e, 0x15, 0x39, 0x67, 0x7e, 0xc6, 0xc2, 0x2a, 0x06, 0xa0,
+ 0xfb, 0x50, 0x6e, 0x87, 0xbd, 0xc8, 0x27, 0x02, 0x5d, 0x98, 0x11, 0x3d, 0x82, 0x88, 0xd0, 0x23,
+ 0x94, 0x86, 0x54, 0x96, 0x74, 0x65, 0xac, 0x08, 0x74, 0x07, 0x16, 0x22, 0x1a, 0x76, 0x29, 0x61,
+ 0xec, 0x73, 0x1a, 0xf6, 0x23, 0x7d, 0x91, 0x2f, 0x8b, 0xe4, 0xbd, 0x97, 0x5c, 0xc0, 0xe3, 0x7c,
+ 0xf6, 0xbf, 0x73, 0x50, 0x4d, 0x86, 0x48, 0xaa, 0xd6, 0x7d, 0x04, 0x05, 0x15, 0x70, 0x2a, 0xd6,
+ 0xdf, 0xce, 0xc7, 0x4a, 0x42, 0xa6, 0x8f, 0x4d, 0x28, 0xb6, 0xfb, 0x54, 0x16, 0xc2, 0xaa, 0x3c,
+ 0x8e, 0x49, 0x61, 0x29, 0x0f, 0xb9, 0xeb, 0x4b, 0x1f, 0xe7, 0xb1, 0x22, 0x44, 0x6d, 0x3c, 0xec,
+ 0xbc, 0xce, 0x57, 0x1b, 0x0f, 0x61, 0xc9, 0xfd, 0x2b, 0xbe, 0xd3, 0xfe, 0x95, 0xce, 0xbd, 0x7f,
+ 0xf6, 0xdf, 0x0d, 0x28, 0x0f, 0xcf, 0x56, 0xc2, 0xbb, 0xc6, 0x3b, 0x7b, 0x77, 0xcc, 0x33, 0xb9,
+ 0xb7, 0xf3, 0xcc, 0x65, 0x28, 0x30, 0x4e, 0x89, 0xdb, 0x53, 0x9d, 0x1b, 0xd6, 0x94, 0xc8, 0x62,
+ 0x3d, 0xd6, 0x95, 0x3b, 0x54, 0xc5, 0x62, 0x68, 0xff, 0xd7, 0x80, 0x85, 0xb1, 0xe3, 0xfe, 0x5e,
+ 0x6d, 0xb9, 0x04, 0xf3, 0x3e, 0x39, 0x21, 0xaa, 0xb7, 0xcc, 0x63, 0x45, 0x88, 0x59, 0xf6, 0x22,
+ 0xa4, 0x5c, 0x2a, 0x57, 0xc5, 0x8a, 0x10, 0x3a, 0x77, 0x08, 0x77, 0x3d, 0x5f, 0xe6, 0xa5, 0x2a,
+ 0xd6, 0x94, 0xd0, 0xb9, 0x4f, 0x7d, 0x5d, 0x5f, 0x8b, 0x21, 0xb2, 0x61, 0xce, 0x0b, 0x8e, 0x42,
+ 0x1d, 0x36, 0xb2, 0xb2, 0x51, 0x75, 0xda, 0x4e, 0x70, 0x14, 0x62, 0xb9, 0x86, 0xae, 0x42, 0x81,
+ 0xba, 0x41, 0x97, 0xc4, 0xc5, 0x75, 0x59, 0x70, 0x61, 0x31, 0x83, 0xf5, 0x82, 0x6d, 0x43, 0x55,
+ 0xf6, 0xa7, 0xbb, 0x84, 0x89, 0x6e, 0x48, 0x84, 0x75, 0xc7, 0xe5, 0xae, 0x34, 0xbb, 0x8a, 0xe5,
+ 0xd8, 0xbe, 0x09, 0xe8, 0xb1, 0xc7, 0xf8, 0x33, 0xd9, 0xc2, 0xb3, 0xb3, 0x9a, 0xd7, 0x7d, 0xb8,
+ 0x38, 0xc6, 0xad, 0xaf, 0x85, 0x9f, 0x4e, 0xb4, 0xaf, 0xd7, 0xd2, 0x19, 0x57, 0xbe, 0x14, 0x38,
+ 0x0a, 0x38, 0xd1, 0xc5, 0x2e, 0x40, 0x45, 0xda, 0xa5, 0xbe, 0x6d, 0xbb, 0x50, 0x55, 0xa4, 0x16,
+ 0xfe, 0x05, 0x5c, 0x88, 0x05, 0x7d, 0x49, 0xa8, 0x6c, 0x45, 0x0c, 0xe9, 0x97, 0x1f, 0x4e, 0xfb,
+ 0x4a, 0x63, 0x9c, 0x1d, 0x4f, 0xe2, 0x6d, 0x02, 0x17, 0x25, 0xcf, 0x43, 0x8f, 0xf1, 0x90, 0x0e,
+ 0x62, 0xab, 0x57, 0x00, 0xb6, 0xda, 0xdc, 0x3b, 0x21, 0x4f, 0x02, 0x5f, 0x5d, 0xa3, 0x25, 0x9c,
+ 0x98, 0x89, 0xaf, 0xc8, 0xdc, 0xa8, 0x87, 0xbb, 0x02, 0xe5, 0x96, 0x4b, 0xfd, 0x41, 0xeb, 0xa5,
+ 0xc7, 0x75, 0x2b, 0x3d, 0x9a, 0xb0, 0x7f, 0x6f, 0xc0, 0x72, 0xf2, 0x3b, 0xad, 0x13, 0x91, 0x2e,
+ 0xee, 0xc1, 0x1c, 0x8f, 0xeb, 0x98, 0xc5, 0x2c, 0x23, 0x52, 0x10, 0x51, 0xea, 0x60, 0x09, 0x4a,
+ 0x78, 0x5a, 0x1d, 0x9c, 0x6b, 0xa7, 0xc3, 0x27, 0x3c, 0xfd, 0xbf, 0x12, 0xa0, 0xf4, 0x72, 0x46,
+ 0x6f, 0x9a, 0x6c, 0xee, 0x72, 0x13, 0xcd, 0xdd, 0xf3, 0xc9, 0xe6, 0x4e, 0x5d, 0xcd, 0x77, 0x66,
+ 0xd1, 0x64, 0x86, 0x16, 0xef, 0x2e, 0x94, 0xe3, 0xea, 0x26, 0xbe, 0xc0, 0xad, 0xb4, 0xe8, 0x61,
+ 0x01, 0x34, 0x62, 0x46, 0xeb, 0xf1, 0x8d, 0xa3, 0xee, 0x3a, 0x14, 0xe7, 0x14, 0x1a, 0xb5, 0x1d,
+ 0x5d, 0x57, 0xe8, 0x5b, 0xe8, 0xfe, 0xf9, 0xde, 0x2d, 0xe6, 0x26, 0xdf, 0x2c, 0x1a, 0x50, 0xd9,
+ 0x8e, 0x13, 0xe5, 0x39, 0x1e, 0x2d, 0x92, 0x20, 0xb4, 0xa1, 0x0b, 0x1b, 0x95, 0x9a, 0xaf, 0xa4,
+ 0x4d, 0x8c, 0x1f, 0x28, 0x42, 0xaa, 0x2b, 0x9b, 0xa3, 0x8c, 0xd2, 0xb2, 0x2c, 0x1d, 0xb4, 0x39,
+ 0x93, 0xef, 0x67, 0xac, 0x2f, 0xd1, 0x67, 0x50, 0xc0, 0x84, 0xf5, 0x7d, 0x2e, 0x5f, 0x42, 0x2a,
+ 0xf5, 0xab, 0x53, 0xa4, 0x2b, 0x26, 0x79, 0x56, 0x35, 0x00, 0xfd, 0x12, 0x8a, 0x6a, 0xc4, 0xcc,
+ 0xca, 0xb4, 0x96, 0x3f, 0x43, 0x33, 0x8d, 0xd1, 0x0d, 0x85, 0xa6, 0xc4, 0x71, 0xfc, 0x9c, 0x04,
+ 0x44, 0xbf, 0xd0, 0x89, 0xb6, 0x76, 0x1e, 0x27, 0x66, 0x50, 0x1d, 0xe6, 0x39, 0x75, 0xdb, 0xc4,
+ 0x5c, 0x98, 0xc1, 0x85, 0x8a, 0x55, 0x24, 0xb6, 0xc8, 0x0b, 0x02, 0xd2, 0x31, 0x17, 0x55, 0xa5,
+ 0xa4, 0x28, 0xf4, 0x03, 0x58, 0x0c, 0xfa, 0x3d, 0xd9, 0x2c, 0x74, 0xf6, 0x39, 0x89, 0x98, 0x79,
+ 0x41, 0x7e, 0x6f, 0x62, 0x16, 0x5d, 0x83, 0x85, 0xa0, 0xdf, 0x3b, 0x10, 0x37, 0xbc, 0x62, 0x5b,
+ 0x92, 0x6c, 0xe3, 0x93, 0xe8, 0x26, 0x2c, 0x0b, 0x5c, 0xbc, 0xdb, 0x8a, 0x73, 0x59, 0x72, 0xa6,
+ 0x17, 0xde, 0x43, 0xcf, 0xfc, 0x3e, 0x3a, 0x02, 0xeb, 0x39, 0x54, 0x93, 0xfb, 0x90, 0x81, 0xbd,
+ 0x33, 0xde, 0x71, 0xcf, 0x10, 0x17, 0x89, 0x86, 0xe3, 0x39, 0x7c, 0xef, 0x69, 0xd4, 0x71, 0x39,
+ 0xc9, 0xca, 0xbc, 0xe9, 0x0c, 0x74, 0x19, 0x0a, 0x7b, 0x6a, 0xa3, 0xd4, 0xcb, 0xa5, 0xa6, 0xc4,
+ 0x7c, 0x93, 0x08, 0xe7, 0xe9, 0x74, 0xab, 0x29, 0xfb, 0x0a, 0x58, 0x59, 0xe2, 0x95, 0x33, 0xec,
+ 0x3f, 0xe7, 0x00, 0x46, 0xc1, 0x80, 0x3e, 0x02, 0xe8, 0x91, 0x8e, 0xe7, 0xfe, 0x9a, 0x8f, 0x1a,
+ 0xca, 0xb2, 0x9c, 0x91, 0x5d, 0xe5, 0xa8, 0xf4, 0xcf, 0xbd, 0x73, 0xe9, 0x8f, 0x60, 0x8e, 0x79,
+ 0xdf, 0x10, 0x5d, 0xa6, 0xc8, 0x31, 0x7a, 0x02, 0x15, 0x37, 0x08, 0x42, 0x2e, 0xc3, 0x38, 0x6e,
+ 0xb6, 0x6f, 0x9d, 0x16, 0xbe, 0xce, 0xd6, 0x88, 0x5f, 0x9d, 0x92, 0xa4, 0x04, 0xeb, 0x3e, 0x2c,
+ 0x4d, 0x32, 0x9c, 0xab, 0x19, 0xfc, 0xd6, 0x80, 0x0b, 0x13, 0x5b, 0x87, 0x3e, 0x1d, 0x66, 0x01,
+ 0x63, 0x86, 0xe3, 0x15, 0x27, 0x80, 0x07, 0x50, 0xdd, 0xe2, 0x5c, 0x64, 0x3d, 0x65, 0x9b, 0x6a,
+ 0xf7, 0x4e, 0xc7, 0x8e, 0x21, 0xec, 0x3f, 0x1a, 0xa3, 0x77, 0xce, 0xcc, 0x9e, 0xff, 0xde, 0x78,
+ 0xcf, 0x7f, 0x7d, 0xfa, 0xe5, 0xf0, 0x3e, 0x5b, 0xfd, 0x1b, 0x3f, 0x87, 0x0f, 0x33, 0x2f, 0x66,
+ 0x54, 0x81, 0xe2, 0xfe, 0xc1, 0x16, 0x3e, 0x68, 0x35, 0x97, 0x3e, 0x40, 0x55, 0x28, 0x6d, 0x3f,
+ 0xd9, 0xdd, 0x7b, 0xdc, 0x3a, 0x68, 0x2d, 0x19, 0x62, 0xa9, 0xd9, 0x12, 0xe3, 0xe6, 0x52, 0xae,
+ 0xfe, 0x6d, 0x01, 0x8a, 0xdb, 0xea, 0xbf, 0x1e, 0x74, 0x00, 0xe5, 0xe1, 0x9f, 0x00, 0xc8, 0xce,
+ 0xf0, 0xce, 0xc4, 0xbf, 0x09, 0xd6, 0xc7, 0xa7, 0xf2, 0xe8, 0xc4, 0xfd, 0x10, 0xe6, 0xe5, 0xdf,
+ 0x21, 0x28, 0xa3, 0xbd, 0x4e, 0xfe, 0x4f, 0x62, 0x9d, 0xfe, 0xf7, 0xc2, 0x86, 0x21, 0x24, 0xc9,
+ 0xb7, 0x89, 0x2c, 0x49, 0xc9, 0xc7, 0x4b, 0x6b, 0xf5, 0x8c, 0x47, 0x0d, 0xb4, 0x0b, 0x05, 0xdd,
+ 0xb0, 0x65, 0xb1, 0x26, 0x5f, 0x20, 0xac, 0xb5, 0xe9, 0x0c, 0x4a, 0xd8, 0x86, 0x81, 0x76, 0x87,
+ 0xef, 0xd1, 0x59, 0xaa, 0x25, 0xab, 0x5d, 0xeb, 0x8c, 0xf5, 0x75, 0x63, 0xc3, 0x40, 0x5f, 0x41,
+ 0x25, 0x51, 0xcf, 0xa2, 0x8c, 0x6a, 0x2a, 0x5d, 0x1c, 0x5b, 0xd7, 0xcf, 0xe0, 0xd2, 0x96, 0xb7,
+ 0x60, 0x4e, 0x1e, 0xa4, 0x0c, 0x67, 0x27, 0xca, 0xdd, 0x2c, 0x35, 0xc7, 0xca, 0xdf, 0x43, 0x55,
+ 0xa0, 0x93, 0x20, 0x19, 0x7d, 0xe8, 0xfa, 0x59, 0xf7, 0xea, 0xd4, 0xb0, 0x49, 0x05, 0xf1, 0x86,
+ 0x81, 0x42, 0x40, 0xe9, 0xe4, 0x89, 0x7e, 0x94, 0x11, 0x25, 0xd3, 0x32, 0xb8, 0x75, 0x73, 0x36,
+ 0x66, 0x65, 0x54, 0xa3, 0xfa, 0xea, 0xcd, 0x8a, 0xf1, 0x8f, 0x37, 0x2b, 0xc6, 0xbf, 0xde, 0xac,
+ 0x18, 0x87, 0x05, 0x59, 0x31, 0xfd, 0xf8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7c, 0xb8, 0xc3,
+ 0x68, 0x0b, 0x1d, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// ControlClient is the client API for Control service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ControlClient interface {
+ DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error)
+ Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error)
+ Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error)
+ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error)
+ Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error)
+ ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error)
+ Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error)
+ ListenBuildHistory(ctx context.Context, in *BuildHistoryRequest, opts ...grpc.CallOption) (Control_ListenBuildHistoryClient, error)
+ UpdateBuildHistory(ctx context.Context, in *UpdateBuildHistoryRequest, opts ...grpc.CallOption) (*UpdateBuildHistoryResponse, error)
+}
+
+type controlClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewControlClient(cc *grpc.ClientConn) ControlClient {
+ return &controlClient{cc}
+}
+
+func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) {
+ out := new(DiskUsageResponse)
+ err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, opts...)
if err != nil {
return nil, err
}
- return dAtA[:n], nil
-}
-
-func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ return out, nil
}
-func (m *UsageRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
+func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[0], "/moby.buildkit.v1.Control/Prune", opts...)
+ if err != nil {
+ return nil, err
}
- if len(m.Parents) > 0 {
- for iNdEx := len(m.Parents) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Parents[iNdEx])
- copy(dAtA[i:], m.Parents[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.Parents[iNdEx])))
- i--
- dAtA[i] = 0x62
- }
+ x := &controlPruneClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
}
- if m.Shared {
- i--
- if m.Shared {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x58
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
}
- if len(m.RecordType) > 0 {
- i -= len(m.RecordType)
- copy(dAtA[i:], m.RecordType)
- i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType)))
- i--
- dAtA[i] = 0x52
+ return x, nil
+}
+
+type Control_PruneClient interface {
+ Recv() (*UsageRecord, error)
+ grpc.ClientStream
+}
+
+type controlPruneClient struct {
+ grpc.ClientStream
+}
+
+func (x *controlPruneClient) Recv() (*UsageRecord, error) {
+ m := new(UsageRecord)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
}
- if len(m.Description) > 0 {
- i -= len(m.Description)
- copy(dAtA[i:], m.Description)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Description)))
- i--
- dAtA[i] = 0x4a
+ return m, nil
+}
+
+func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) {
+ out := new(SolveResponse)
+ err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, opts...)
+ if err != nil {
+ return nil, err
}
- if m.UsageCount != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.UsageCount))
- i--
- dAtA[i] = 0x40
+ return out, nil
+}
+
+func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[1], "/moby.buildkit.v1.Control/Status", opts...)
+ if err != nil {
+ return nil, err
}
- if m.LastUsedAt != nil {
- n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt):])
- if err1 != nil {
- return 0, err1
- }
- i -= n1
- i = encodeVarintControl(dAtA, i, uint64(n1))
- i--
- dAtA[i] = 0x3a
+ x := &controlStatusClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
}
- n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
- if err2 != nil {
- return 0, err2
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
}
- i -= n2
- i = encodeVarintControl(dAtA, i, uint64(n2))
- i--
- dAtA[i] = 0x32
- if len(m.Parent) > 0 {
- i -= len(m.Parent)
- copy(dAtA[i:], m.Parent)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Parent)))
- i--
- dAtA[i] = 0x2a
+ return x, nil
+}
+
+type Control_StatusClient interface {
+ Recv() (*StatusResponse, error)
+ grpc.ClientStream
+}
+
+type controlStatusClient struct {
+ grpc.ClientStream
+}
+
+func (x *controlStatusClient) Recv() (*StatusResponse, error) {
+ m := new(StatusResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
}
- if m.Size_ != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.Size_))
- i--
- dAtA[i] = 0x20
+ return m, nil
+}
+
+func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[2], "/moby.buildkit.v1.Control/Session", opts...)
+ if err != nil {
+ return nil, err
}
- if m.InUse {
- i--
- if m.InUse {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
+ x := &controlSessionClient{stream}
+ return x, nil
+}
+
+type Control_SessionClient interface {
+ Send(*BytesMessage) error
+ Recv() (*BytesMessage, error)
+ grpc.ClientStream
+}
+
+type controlSessionClient struct {
+ grpc.ClientStream
+}
+
+func (x *controlSessionClient) Send(m *BytesMessage) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *controlSessionClient) Recv() (*BytesMessage, error) {
+ m := new(BytesMessage)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
}
- if m.Mutable {
- i--
- if m.Mutable {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
+ return m, nil
+}
+
+func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) {
+ out := new(ListWorkersResponse)
+ err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, opts...)
+ if err != nil {
+ return nil, err
}
- if len(m.ID) > 0 {
- i -= len(m.ID)
- copy(dAtA[i:], m.ID)
- i = encodeVarintControl(dAtA, i, uint64(len(m.ID)))
- i--
- dAtA[i] = 0xa
+ return out, nil
+}
+
+func (c *controlClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
+ out := new(InfoResponse)
+ err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Info", in, out, opts...)
+ if err != nil {
+ return nil, err
}
- return len(dAtA) - i, nil
+ return out, nil
}
-func (m *SolveRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
+func (c *controlClient) ListenBuildHistory(ctx context.Context, in *BuildHistoryRequest, opts ...grpc.CallOption) (Control_ListenBuildHistoryClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[3], "/moby.buildkit.v1.Control/ListenBuildHistory", opts...)
if err != nil {
return nil, err
}
- return dAtA[:n], nil
+ x := &controlListenBuildHistoryClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
}
-func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+type Control_ListenBuildHistoryClient interface {
+ Recv() (*BuildHistoryEvent, error)
+ grpc.ClientStream
}
-func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
+type controlListenBuildHistoryClient struct {
+ grpc.ClientStream
+}
+
+func (x *controlListenBuildHistoryClient) Recv() (*BuildHistoryEvent, error) {
+ m := new(BuildHistoryEvent)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
}
- if len(m.FrontendInputs) > 0 {
- for k := range m.FrontendInputs {
- v := m.FrontendInputs[k]
- baseI := i
- if v != nil {
- {
- size, err := v.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- i -= len(k)
- copy(dAtA[i:], k)
- i = encodeVarintControl(dAtA, i, uint64(len(k)))
- i--
- dAtA[i] = 0xa
- i = encodeVarintControl(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x52
- }
+ return m, nil
+}
+
+func (c *controlClient) UpdateBuildHistory(ctx context.Context, in *UpdateBuildHistoryRequest, opts ...grpc.CallOption) (*UpdateBuildHistoryResponse, error) {
+ out := new(UpdateBuildHistoryResponse)
+ err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/UpdateBuildHistory", in, out, opts...)
+ if err != nil {
+ return nil, err
}
- if len(m.Entitlements) > 0 {
- for iNdEx := len(m.Entitlements) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Entitlements[iNdEx])
- copy(dAtA[i:], m.Entitlements[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.Entitlements[iNdEx])))
- i--
- dAtA[i] = 0x4a
- }
+ return out, nil
+}
+
+// ControlServer is the server API for Control service.
+type ControlServer interface {
+ DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error)
+ Prune(*PruneRequest, Control_PruneServer) error
+ Solve(context.Context, *SolveRequest) (*SolveResponse, error)
+ Status(*StatusRequest, Control_StatusServer) error
+ Session(Control_SessionServer) error
+ ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error)
+ Info(context.Context, *InfoRequest) (*InfoResponse, error)
+ ListenBuildHistory(*BuildHistoryRequest, Control_ListenBuildHistoryServer) error
+ UpdateBuildHistory(context.Context, *UpdateBuildHistoryRequest) (*UpdateBuildHistoryResponse, error)
+}
+
+// UnimplementedControlServer can be embedded to have forward compatible implementations.
+type UnimplementedControlServer struct {
+}
+
+func (*UnimplementedControlServer) DiskUsage(ctx context.Context, req *DiskUsageRequest) (*DiskUsageResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DiskUsage not implemented")
+}
+func (*UnimplementedControlServer) Prune(req *PruneRequest, srv Control_PruneServer) error {
+ return status.Errorf(codes.Unimplemented, "method Prune not implemented")
+}
+func (*UnimplementedControlServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented")
+}
+func (*UnimplementedControlServer) Status(req *StatusRequest, srv Control_StatusServer) error {
+ return status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (*UnimplementedControlServer) Session(srv Control_SessionServer) error {
+ return status.Errorf(codes.Unimplemented, "method Session not implemented")
+}
+func (*UnimplementedControlServer) ListWorkers(ctx context.Context, req *ListWorkersRequest) (*ListWorkersResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented")
+}
+func (*UnimplementedControlServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
+}
+func (*UnimplementedControlServer) ListenBuildHistory(req *BuildHistoryRequest, srv Control_ListenBuildHistoryServer) error {
+ return status.Errorf(codes.Unimplemented, "method ListenBuildHistory not implemented")
+}
+func (*UnimplementedControlServer) UpdateBuildHistory(ctx context.Context, req *UpdateBuildHistoryRequest) (*UpdateBuildHistoryResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateBuildHistory not implemented")
+}
+
+func RegisterControlServer(s *grpc.Server, srv ControlServer) {
+ s.RegisterService(&_Control_serviceDesc, srv)
+}
+
+func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DiskUsageRequest)
+ if err := dec(in); err != nil {
+ return nil, err
}
- {
- size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
+ if interceptor == nil {
+ return srv.(ControlServer).DiskUsage(ctx, in)
}
- i--
- dAtA[i] = 0x42
- if len(m.FrontendAttrs) > 0 {
- for k := range m.FrontendAttrs {
- v := m.FrontendAttrs[k]
- baseI := i
- i -= len(v)
- copy(dAtA[i:], v)
- i = encodeVarintControl(dAtA, i, uint64(len(v)))
- i--
- dAtA[i] = 0x12
- i -= len(k)
- copy(dAtA[i:], k)
- i = encodeVarintControl(dAtA, i, uint64(len(k)))
- i--
- dAtA[i] = 0xa
- i = encodeVarintControl(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x3a
- }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/moby.buildkit.v1.Control/DiskUsage",
}
- if len(m.Frontend) > 0 {
- i -= len(m.Frontend)
- copy(dAtA[i:], m.Frontend)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend)))
- i--
- dAtA[i] = 0x32
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest))
}
- if len(m.Session) > 0 {
- i -= len(m.Session)
- copy(dAtA[i:], m.Session)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Session)))
- i--
- dAtA[i] = 0x2a
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(PruneRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
}
- if len(m.ExporterAttrs) > 0 {
- for k := range m.ExporterAttrs {
- v := m.ExporterAttrs[k]
- baseI := i
- i -= len(v)
- copy(dAtA[i:], v)
- i = encodeVarintControl(dAtA, i, uint64(len(v)))
- i--
- dAtA[i] = 0x12
- i -= len(k)
- copy(dAtA[i:], k)
- i = encodeVarintControl(dAtA, i, uint64(len(k)))
- i--
- dAtA[i] = 0xa
- i = encodeVarintControl(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x22
- }
+ return srv.(ControlServer).Prune(m, &controlPruneServer{stream})
+}
+
+type Control_PruneServer interface {
+ Send(*UsageRecord) error
+ grpc.ServerStream
+}
+
+type controlPruneServer struct {
+ grpc.ServerStream
+}
+
+func (x *controlPruneServer) Send(m *UsageRecord) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SolveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
}
- if len(m.Exporter) > 0 {
- i -= len(m.Exporter)
- copy(dAtA[i:], m.Exporter)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter)))
- i--
- dAtA[i] = 0x1a
+ if interceptor == nil {
+ return srv.(ControlServer).Solve(ctx, in)
}
- if m.Definition != nil {
- {
- size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/moby.buildkit.v1.Control/Solve",
}
- if len(m.Ref) > 0 {
- i -= len(m.Ref)
- copy(dAtA[i:], m.Ref)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Ref)))
- i--
- dAtA[i] = 0xa
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).Solve(ctx, req.(*SolveRequest))
}
- return len(dAtA) - i, nil
+ return interceptor(ctx, in, info, handler)
}
-func (m *CacheOptions) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(StatusRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
}
- return dAtA[:n], nil
+ return srv.(ControlServer).Status(m, &controlStatusServer{stream})
}
-func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+type Control_StatusServer interface {
+ Send(*StatusResponse) error
+ grpc.ServerStream
}
-func (m *CacheOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
+type controlStatusServer struct {
+ grpc.ServerStream
+}
+
+func (x *controlStatusServer) Send(m *StatusResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(ControlServer).Session(&controlSessionServer{stream})
+}
+
+type Control_SessionServer interface {
+ Send(*BytesMessage) error
+ Recv() (*BytesMessage, error)
+ grpc.ServerStream
+}
+
+type controlSessionServer struct {
+ grpc.ServerStream
+}
+
+func (x *controlSessionServer) Send(m *BytesMessage) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *controlSessionServer) Recv() (*BytesMessage, error) {
+ m := new(BytesMessage)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
}
- if len(m.Imports) > 0 {
- for iNdEx := len(m.Imports) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Imports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
+ return m, nil
+}
+
+func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListWorkersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
}
- if len(m.Exports) > 0 {
- for iNdEx := len(m.Exports) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
+ if interceptor == nil {
+ return srv.(ControlServer).ListWorkers(ctx, in)
}
- if len(m.ExportAttrsDeprecated) > 0 {
- for k := range m.ExportAttrsDeprecated {
- v := m.ExportAttrsDeprecated[k]
- baseI := i
- i -= len(v)
- copy(dAtA[i:], v)
- i = encodeVarintControl(dAtA, i, uint64(len(v)))
- i--
- dAtA[i] = 0x12
- i -= len(k)
- copy(dAtA[i:], k)
- i = encodeVarintControl(dAtA, i, uint64(len(k)))
- i--
- dAtA[i] = 0xa
- i = encodeVarintControl(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x1a
- }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/moby.buildkit.v1.Control/ListWorkers",
}
- if len(m.ImportRefsDeprecated) > 0 {
- for iNdEx := len(m.ImportRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ImportRefsDeprecated[iNdEx])
- copy(dAtA[i:], m.ImportRefsDeprecated[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.ImportRefsDeprecated[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest))
}
- if len(m.ExportRefDeprecated) > 0 {
- i -= len(m.ExportRefDeprecated)
- copy(dAtA[i:], m.ExportRefDeprecated)
- i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRefDeprecated)))
- i--
- dAtA[i] = 0xa
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Control_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(InfoRequest)
+ if err := dec(in); err != nil {
+ return nil, err
}
- return len(dAtA) - i, nil
+ if interceptor == nil {
+ return srv.(ControlServer).Info(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/moby.buildkit.v1.Control/Info",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).Info(ctx, req.(*InfoRequest))
+ }
+ return interceptor(ctx, in, info, handler)
}
-func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) {
+func _Control_ListenBuildHistory_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(BuildHistoryRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(ControlServer).ListenBuildHistory(m, &controlListenBuildHistoryServer{stream})
+}
+
+type Control_ListenBuildHistoryServer interface {
+ Send(*BuildHistoryEvent) error
+ grpc.ServerStream
+}
+
+type controlListenBuildHistoryServer struct {
+ grpc.ServerStream
+}
+
+func (x *controlListenBuildHistoryServer) Send(m *BuildHistoryEvent) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Control_UpdateBuildHistory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateBuildHistoryRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ControlServer).UpdateBuildHistory(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/moby.buildkit.v1.Control/UpdateBuildHistory",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ControlServer).UpdateBuildHistory(ctx, req.(*UpdateBuildHistoryRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Control_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "moby.buildkit.v1.Control",
+ HandlerType: (*ControlServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "DiskUsage",
+ Handler: _Control_DiskUsage_Handler,
+ },
+ {
+ MethodName: "Solve",
+ Handler: _Control_Solve_Handler,
+ },
+ {
+ MethodName: "ListWorkers",
+ Handler: _Control_ListWorkers_Handler,
+ },
+ {
+ MethodName: "Info",
+ Handler: _Control_Info_Handler,
+ },
+ {
+ MethodName: "UpdateBuildHistory",
+ Handler: _Control_UpdateBuildHistory_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Prune",
+ Handler: _Control_Prune_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "Status",
+ Handler: _Control_Status_Handler,
+ ServerStreams: true,
+ },
+ {
+ StreamName: "Session",
+ Handler: _Control_Session_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "ListenBuildHistory",
+ Handler: _Control_ListenBuildHistory_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "control.proto",
+}
+
+func (m *PruneRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2221,12 +2623,12 @@ func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) {
+func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *PruneRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2235,36 +2637,39 @@ func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Attrs) > 0 {
- for k := range m.Attrs {
- v := m.Attrs[k]
- baseI := i
- i -= len(v)
- copy(dAtA[i:], v)
- i = encodeVarintControl(dAtA, i, uint64(len(v)))
- i--
- dAtA[i] = 0x12
- i -= len(k)
- copy(dAtA[i:], k)
- i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ if m.KeepBytes != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.KeepBytes))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.KeepDuration != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.KeepDuration))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.All {
+ i--
+ if m.All {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Filter) > 0 {
+ for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filter[iNdEx])
+ copy(dAtA[i:], m.Filter[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx])))
i--
dAtA[i] = 0xa
- i = encodeVarintControl(dAtA, i, uint64(baseI-i))
- i--
- dAtA[i] = 0x12
}
}
- if len(m.Type) > 0 {
- i -= len(m.Type)
- copy(dAtA[i:], m.Type)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Type)))
- i--
- dAtA[i] = 0xa
- }
return len(dAtA) - i, nil
}
-func (m *SolveResponse) Marshal() (dAtA []byte, err error) {
+func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2274,12 +2679,12 @@ func (m *SolveResponse) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) {
+func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DiskUsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2288,21 +2693,11 @@ func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.ExporterResponse) > 0 {
- for k := range m.ExporterResponse {
- v := m.ExporterResponse[k]
- baseI := i
- i -= len(v)
- copy(dAtA[i:], v)
- i = encodeVarintControl(dAtA, i, uint64(len(v)))
- i--
- dAtA[i] = 0x12
- i -= len(k)
- copy(dAtA[i:], k)
- i = encodeVarintControl(dAtA, i, uint64(len(k)))
- i--
- dAtA[i] = 0xa
- i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ if len(m.Filter) > 0 {
+ for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filter[iNdEx])
+ copy(dAtA[i:], m.Filter[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx])))
i--
dAtA[i] = 0xa
}
@@ -2310,7 +2705,7 @@ func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2320,12 +2715,12 @@ func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *DiskUsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2334,17 +2729,24 @@ func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Ref) > 0 {
- i -= len(m.Ref)
- copy(dAtA[i:], m.Ref)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Ref)))
- i--
- dAtA[i] = 0xa
+ if len(m.Record) > 0 {
+ for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
}
return len(dAtA) - i, nil
}
-func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+func (m *UsageRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2354,12 +2756,12 @@ func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *UsageRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2368,239 +2770,93 @@ func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Warnings) > 0 {
- for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Warnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
+ if len(m.Parents) > 0 {
+ for iNdEx := len(m.Parents) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Parents[iNdEx])
+ copy(dAtA[i:], m.Parents[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Parents[iNdEx])))
i--
- dAtA[i] = 0x22
- }
- }
- if len(m.Logs) > 0 {
- for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Statuses) > 0 {
- for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Vertexes) > 0 {
- for iNdEx := len(m.Vertexes) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Vertexes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Vertex) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Vertex) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ProgressGroup != nil {
- {
- size, err := m.ProgressGroup.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- if len(m.Error) > 0 {
- i -= len(m.Error)
- copy(dAtA[i:], m.Error)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Error)))
- i--
- dAtA[i] = 0x3a
- }
- if m.Completed != nil {
- n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):])
- if err7 != nil {
- return 0, err7
- }
- i -= n7
- i = encodeVarintControl(dAtA, i, uint64(n7))
- i--
- dAtA[i] = 0x32
- }
- if m.Started != nil {
- n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):])
- if err8 != nil {
- return 0, err8
+ dAtA[i] = 0x62
}
- i -= n8
- i = encodeVarintControl(dAtA, i, uint64(n8))
- i--
- dAtA[i] = 0x2a
}
- if m.Cached {
+ if m.Shared {
i--
- if m.Cached {
+ if m.Shared {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
- dAtA[i] = 0x20
+ dAtA[i] = 0x58
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Name)))
+ if len(m.RecordType) > 0 {
+ i -= len(m.RecordType)
+ copy(dAtA[i:], m.RecordType)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType)))
i--
- dAtA[i] = 0x1a
- }
- if len(m.Inputs) > 0 {
- for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Inputs[iNdEx])
- copy(dAtA[i:], m.Inputs[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.Inputs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
+ dAtA[i] = 0x52
}
- if len(m.Digest) > 0 {
- i -= len(m.Digest)
- copy(dAtA[i:], m.Digest)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Digest)))
+ if len(m.Description) > 0 {
+ i -= len(m.Description)
+ copy(dAtA[i:], m.Description)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Description)))
i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *VertexStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
+ dAtA[i] = 0x4a
}
- if m.Completed != nil {
- n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):])
- if err9 != nil {
- return 0, err9
- }
- i -= n9
- i = encodeVarintControl(dAtA, i, uint64(n9))
+ if m.UsageCount != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.UsageCount))
i--
- dAtA[i] = 0x42
+ dAtA[i] = 0x40
}
- if m.Started != nil {
- n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):])
- if err10 != nil {
- return 0, err10
+ if m.LastUsedAt != nil {
+ n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt):])
+ if err1 != nil {
+ return 0, err1
}
- i -= n10
- i = encodeVarintControl(dAtA, i, uint64(n10))
+ i -= n1
+ i = encodeVarintControl(dAtA, i, uint64(n1))
i--
dAtA[i] = 0x3a
}
- n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
- if err11 != nil {
- return 0, err11
+ n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):])
+ if err2 != nil {
+ return 0, err2
}
- i -= n11
- i = encodeVarintControl(dAtA, i, uint64(n11))
+ i -= n2
+ i = encodeVarintControl(dAtA, i, uint64(n2))
i--
dAtA[i] = 0x32
- if m.Total != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.Total))
+ if len(m.Parent) > 0 {
+ i -= len(m.Parent)
+ copy(dAtA[i:], m.Parent)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Parent)))
i--
- dAtA[i] = 0x28
+ dAtA[i] = 0x2a
}
- if m.Current != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.Current))
+ if m.Size_ != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Size_))
i--
dAtA[i] = 0x20
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Name)))
+ if m.InUse {
i--
- dAtA[i] = 0x1a
+ if m.InUse {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
}
- if len(m.Vertex) > 0 {
- i -= len(m.Vertex)
- copy(dAtA[i:], m.Vertex)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex)))
+ if m.Mutable {
i--
- dAtA[i] = 0x12
+ if m.Mutable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
}
if len(m.ID) > 0 {
i -= len(m.ID)
@@ -2612,7 +2868,7 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *VertexLog) Marshal() (dAtA []byte, err error) {
+func (m *SolveRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2622,12 +2878,12 @@ func (m *VertexLog) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) {
+func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2636,37 +2892,155 @@ func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Msg) > 0 {
- i -= len(m.Msg)
- copy(dAtA[i:], m.Msg)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Msg)))
+ if m.SourcePolicy != nil {
+ {
+ size, err := m.SourcePolicy.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x22
+ dAtA[i] = 0x62
}
- if m.Stream != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.Stream))
+ if m.Internal {
i--
- dAtA[i] = 0x18
- }
- n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
- if err12 != nil {
- return 0, err12
- }
- i -= n12
- i = encodeVarintControl(dAtA, i, uint64(n12))
+ if m.Internal {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x58
+ }
+ if len(m.FrontendInputs) > 0 {
+ for k := range m.FrontendInputs {
+ v := m.FrontendInputs[k]
+ baseI := i
+ if v != nil {
+ {
+ size, err := v.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x52
+ }
+ }
+ if len(m.Entitlements) > 0 {
+ for iNdEx := len(m.Entitlements) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Entitlements[iNdEx])
+ copy(dAtA[i:], m.Entitlements[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Entitlements[iNdEx])))
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ {
+ size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
i--
- dAtA[i] = 0x12
- if len(m.Vertex) > 0 {
- i -= len(m.Vertex)
- copy(dAtA[i:], m.Vertex)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex)))
+ dAtA[i] = 0x42
+ if len(m.FrontendAttrs) > 0 {
+ for k := range m.FrontendAttrs {
+ v := m.FrontendAttrs[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if len(m.Frontend) > 0 {
+ i -= len(m.Frontend)
+ copy(dAtA[i:], m.Frontend)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.Session) > 0 {
+ i -= len(m.Session)
+ copy(dAtA[i:], m.Session)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Session)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.ExporterAttrs) > 0 {
+ for k := range m.ExporterAttrs {
+ v := m.ExporterAttrs[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Exporter) > 0 {
+ i -= len(m.Exporter)
+ copy(dAtA[i:], m.Exporter)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Definition != nil {
+ {
+ size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Ref) > 0 {
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Ref)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *VertexWarning) Marshal() (dAtA []byte, err error) {
+func (m *CacheOptions) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2676,12 +3050,12 @@ func (m *VertexWarning) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *VertexWarning) MarshalTo(dAtA []byte) (int, error) {
+func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *CacheOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2690,10 +3064,10 @@ func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Ranges) > 0 {
- for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- {
+ if len(m.Imports) > 0 {
+ for iNdEx := len(m.Imports) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Imports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2701,60 +3075,62 @@ func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i = encodeVarintControl(dAtA, i, uint64(size))
}
i--
- dAtA[i] = 0x3a
+ dAtA[i] = 0x2a
}
}
- if m.Info != nil {
- {
- size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
+ if len(m.Exports) > 0 {
+ for iNdEx := len(m.Exports) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exports[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
}
- i -= size
- i = encodeVarintControl(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if len(m.Url) > 0 {
- i -= len(m.Url)
- copy(dAtA[i:], m.Url)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Url)))
- i--
- dAtA[i] = 0x2a
- }
- if len(m.Detail) > 0 {
- for iNdEx := len(m.Detail) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Detail[iNdEx])
- copy(dAtA[i:], m.Detail[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.Detail[iNdEx])))
i--
dAtA[i] = 0x22
}
}
- if len(m.Short) > 0 {
- i -= len(m.Short)
- copy(dAtA[i:], m.Short)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Short)))
- i--
- dAtA[i] = 0x1a
+ if len(m.ExportAttrsDeprecated) > 0 {
+ for k := range m.ExportAttrsDeprecated {
+ v := m.ExportAttrsDeprecated[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
}
- if m.Level != 0 {
- i = encodeVarintControl(dAtA, i, uint64(m.Level))
- i--
- dAtA[i] = 0x10
+ if len(m.ImportRefsDeprecated) > 0 {
+ for iNdEx := len(m.ImportRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ImportRefsDeprecated[iNdEx])
+ copy(dAtA[i:], m.ImportRefsDeprecated[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.ImportRefsDeprecated[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
}
- if len(m.Vertex) > 0 {
- i -= len(m.Vertex)
- copy(dAtA[i:], m.Vertex)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex)))
+ if len(m.ExportRefDeprecated) > 0 {
+ i -= len(m.ExportRefDeprecated)
+ copy(dAtA[i:], m.ExportRefDeprecated)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRefDeprecated)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
+func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2764,12 +3140,12 @@ func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
+func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2778,17 +3154,36 @@ func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Data) > 0 {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintControl(dAtA, i, uint64(len(m.Data)))
+ if len(m.Attrs) > 0 {
+ for k := range m.Attrs {
+ v := m.Attrs[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Type)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
-func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) {
+func (m *SolveResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2798,12 +3193,12 @@ func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) {
+func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2812,11 +3207,21 @@ func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Filter) > 0 {
- for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Filter[iNdEx])
- copy(dAtA[i:], m.Filter[iNdEx])
- i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx])))
+ if len(m.ExporterResponse) > 0 {
+ for k := range m.ExporterResponse {
+ v := m.ExporterResponse[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0xa
}
@@ -2824,7 +3229,7 @@ func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) {
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2834,12 +3239,12 @@ func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) {
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
@@ -2848,10 +3253,86 @@ func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Record) > 0 {
- for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- {
+ if len(m.Ref) > 0 {
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Ref)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Warnings) > 0 {
+ for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- {
{
- size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ size, err := m.Warnings[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.Logs) > 0 {
+ for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Statuses) > 0 {
+ for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Vertexes) > 0 {
+ for iNdEx := len(m.Vertexes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Vertexes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
@@ -2865,554 +3346,3836 @@ func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
-func encodeVarintControl(dAtA []byte, offset int, v uint64) int {
- offset -= sovControl(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
+func (m *Vertex) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- dAtA[offset] = uint8(v)
- return base
+ return dAtA[:n], nil
}
-func (m *PruneRequest) Size() (n int) {
- if m == nil {
- return 0
- }
+
+func (m *Vertex) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Filter) > 0 {
- for _, s := range m.Filter {
- l = len(s)
- n += 1 + l + sovControl(uint64(l))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ProgressGroup != nil {
+ {
+ size, err := m.ProgressGroup.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x42
}
- if m.All {
- n += 2
+ if len(m.Error) > 0 {
+ i -= len(m.Error)
+ copy(dAtA[i:], m.Error)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Error)))
+ i--
+ dAtA[i] = 0x3a
}
- if m.KeepDuration != 0 {
- n += 1 + sovControl(uint64(m.KeepDuration))
+ if m.Completed != nil {
+ n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):])
+ if err8 != nil {
+ return 0, err8
+ }
+ i -= n8
+ i = encodeVarintControl(dAtA, i, uint64(n8))
+ i--
+ dAtA[i] = 0x32
}
- if m.KeepBytes != 0 {
- n += 1 + sovControl(uint64(m.KeepBytes))
+ if m.Started != nil {
+ n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):])
+ if err9 != nil {
+ return 0, err9
+ }
+ i -= n9
+ i = encodeVarintControl(dAtA, i, uint64(n9))
+ i--
+ dAtA[i] = 0x2a
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if m.Cached {
+ i--
+ if m.Cached {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
}
- return n
-}
-
-func (m *DiskUsageRequest) Size() (n int) {
- if m == nil {
- return 0
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
}
- var l int
- _ = l
- if len(m.Filter) > 0 {
- for _, s := range m.Filter {
- l = len(s)
- n += 1 + l + sovControl(uint64(l))
+ if len(m.Inputs) > 0 {
+ for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Inputs[iNdEx])
+ copy(dAtA[i:], m.Inputs[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Inputs[iNdEx])))
+ i--
+ dAtA[i] = 0x12
}
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if len(m.Digest) > 0 {
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Digest)))
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *DiskUsageResponse) Size() (n int) {
- if m == nil {
- return 0
+func (m *VertexStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Record) > 0 {
- for _, e := range m.Record {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- }
if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *UsageRecord) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.ID)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if m.Mutable {
- n += 2
- }
- if m.InUse {
- n += 2
- }
- if m.Size_ != 0 {
- n += 1 + sovControl(uint64(m.Size_))
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- l = len(m.Parent)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.Completed != nil {
+ n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):])
+ if err10 != nil {
+ return 0, err10
+ }
+ i -= n10
+ i = encodeVarintControl(dAtA, i, uint64(n10))
+ i--
+ dAtA[i] = 0x42
}
- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
- n += 1 + l + sovControl(uint64(l))
- if m.LastUsedAt != nil {
- l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt)
- n += 1 + l + sovControl(uint64(l))
+ if m.Started != nil {
+ n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):])
+ if err11 != nil {
+ return 0, err11
+ }
+ i -= n11
+ i = encodeVarintControl(dAtA, i, uint64(n11))
+ i--
+ dAtA[i] = 0x3a
}
- if m.UsageCount != 0 {
- n += 1 + sovControl(uint64(m.UsageCount))
+ n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
+ if err12 != nil {
+ return 0, err12
}
- l = len(m.Description)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ i -= n12
+ i = encodeVarintControl(dAtA, i, uint64(n12))
+ i--
+ dAtA[i] = 0x32
+ if m.Total != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Total))
+ i--
+ dAtA[i] = 0x28
}
- l = len(m.RecordType)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.Current != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Current))
+ i--
+ dAtA[i] = 0x20
}
- if m.Shared {
- n += 2
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
}
- if len(m.Parents) > 0 {
- for _, s := range m.Parents {
- l = len(s)
- n += 1 + l + sovControl(uint64(l))
- }
+ if len(m.Vertex) > 0 {
+ i -= len(m.Vertex)
+ copy(dAtA[i:], m.Vertex)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if len(m.ID) > 0 {
+ i -= len(m.ID)
+ copy(dAtA[i:], m.ID)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.ID)))
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *SolveRequest) Size() (n int) {
- if m == nil {
- return 0
+func (m *VertexLog) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Ref)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if m.Definition != nil {
- l = m.Definition.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- l = len(m.Exporter)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if len(m.ExporterAttrs) > 0 {
- for k, v := range m.ExporterAttrs {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
- n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
- }
- }
- l = len(m.Session)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- l = len(m.Frontend)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.FrontendAttrs) > 0 {
- for k, v := range m.FrontendAttrs {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
- n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
- }
+ if len(m.Msg) > 0 {
+ i -= len(m.Msg)
+ copy(dAtA[i:], m.Msg)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Msg)))
+ i--
+ dAtA[i] = 0x22
}
- l = m.Cache.Size()
- n += 1 + l + sovControl(uint64(l))
- if len(m.Entitlements) > 0 {
- for _, s := range m.Entitlements {
- l = len(s)
- n += 1 + l + sovControl(uint64(l))
- }
+ if m.Stream != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Stream))
+ i--
+ dAtA[i] = 0x18
}
- if len(m.FrontendInputs) > 0 {
- for k, v := range m.FrontendInputs {
- _ = k
- _ = v
- l = 0
- if v != nil {
- l = v.Size()
- l += 1 + sovControl(uint64(l))
- }
- mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l
- n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
- }
+ n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):])
+ if err13 != nil {
+ return 0, err13
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ i -= n13
+ i = encodeVarintControl(dAtA, i, uint64(n13))
+ i--
+ dAtA[i] = 0x12
+ if len(m.Vertex) > 0 {
+ i -= len(m.Vertex)
+ copy(dAtA[i:], m.Vertex)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex)))
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *CacheOptions) Size() (n int) {
- if m == nil {
- return 0
+func (m *VertexWarning) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *VertexWarning) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.ExportRefDeprecated)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.ImportRefsDeprecated) > 0 {
- for _, s := range m.ImportRefsDeprecated {
- l = len(s)
- n += 1 + l + sovControl(uint64(l))
+ if len(m.Ranges) > 0 {
+ for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
}
}
- if len(m.ExportAttrsDeprecated) > 0 {
- for k, v := range m.ExportAttrsDeprecated {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
- n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ if m.Info != nil {
+ {
+ size, err := m.Info.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0x32
}
- if len(m.Exports) > 0 {
- for _, e := range m.Exports {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
+ if len(m.Url) > 0 {
+ i -= len(m.Url)
+ copy(dAtA[i:], m.Url)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Url)))
+ i--
+ dAtA[i] = 0x2a
}
- if len(m.Imports) > 0 {
- for _, e := range m.Imports {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
+ if len(m.Detail) > 0 {
+ for iNdEx := len(m.Detail) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Detail[iNdEx])
+ copy(dAtA[i:], m.Detail[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Detail[iNdEx])))
+ i--
+ dAtA[i] = 0x22
}
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if len(m.Short) > 0 {
+ i -= len(m.Short)
+ copy(dAtA[i:], m.Short)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Short)))
+ i--
+ dAtA[i] = 0x1a
}
- return n
+ if m.Level != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Level))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Vertex) > 0 {
+ i -= len(m.Vertex)
+ copy(dAtA[i:], m.Vertex)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
-func (m *CacheOptionsEntry) Size() (n int) {
- if m == nil {
- return 0
+func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Type)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if len(m.Attrs) > 0 {
- for k, v := range m.Attrs {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
- n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
- }
- }
if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return n
+ if len(m.Data) > 0 {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
}
-func (m *SolveResponse) Size() (n int) {
- if m == nil {
- return 0
+func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.ExporterResponse) > 0 {
- for k, v := range m.ExporterResponse {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
- n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
- }
- }
if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return n
+ if len(m.Filter) > 0 {
+ for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Filter[iNdEx])
+ copy(dAtA[i:], m.Filter[iNdEx])
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
}
-func (m *StatusRequest) Size() (n int) {
- if m == nil {
- return 0
+func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Ref)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return n
+ if len(m.Record) > 0 {
+ for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
}
-func (m *StatusResponse) Size() (n int) {
- if m == nil {
- return 0
+func (m *InfoRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Vertexes) > 0 {
- for _, e := range m.Vertexes {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- }
- if len(m.Statuses) > 0 {
- for _, e := range m.Statuses {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- }
- if len(m.Logs) > 0 {
- for _, e := range m.Logs {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- }
- if len(m.Warnings) > 0 {
- for _, e := range m.Warnings {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- }
if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *Vertex) Size() (n int) {
- if m == nil {
- return 0
+func (m *InfoResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Digest)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Inputs) > 0 {
- for _, s := range m.Inputs {
- l = len(s)
- n += 1 + l + sovControl(uint64(l))
+ if m.BuildkitVersion != nil {
+ {
+ size, err := m.BuildkitVersion.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
}
+ i--
+ dAtA[i] = 0xa
}
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if m.Cached {
- n += 2
- }
- if m.Started != nil {
- l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)
- n += 1 + l + sovControl(uint64(l))
- }
- if m.Completed != nil {
- l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)
- n += 1 + l + sovControl(uint64(l))
- }
- l = len(m.Error)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if m.ProgressGroup != nil {
- l = m.ProgressGroup.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
+ return len(dAtA) - i, nil
}
-func (m *VertexStatus) Size() (n int) {
- if m == nil {
- return 0
+func (m *BuildHistoryRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *BuildHistoryRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildHistoryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.ID)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- l = len(m.Vertex)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if m.Current != 0 {
- n += 1 + sovControl(uint64(m.Current))
- }
- if m.Total != 0 {
- n += 1 + sovControl(uint64(m.Total))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
- n += 1 + l + sovControl(uint64(l))
- if m.Started != nil {
- l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)
- n += 1 + l + sovControl(uint64(l))
+ if m.EarlyExit {
+ i--
+ if m.EarlyExit {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
}
- if m.Completed != nil {
- l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)
- n += 1 + l + sovControl(uint64(l))
+ if len(m.Ref) > 0 {
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Ref)))
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if m.ActiveOnly {
+ i--
+ if m.ActiveOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *VertexLog) Size() (n int) {
- if m == nil {
- return 0
+func (m *BuildHistoryEvent) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *BuildHistoryEvent) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildHistoryEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Vertex)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
- n += 1 + l + sovControl(uint64(l))
- if m.Stream != 0 {
- n += 1 + sovControl(uint64(m.Stream))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- l = len(m.Msg)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.Record != nil {
+ {
+ size, err := m.Record.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if m.Type != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *VertexWarning) Size() (n int) {
- if m == nil {
- return 0
+func (m *BuildHistoryRecord) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *BuildHistoryRecord) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildHistoryRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- l = len(m.Vertex)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
- }
- if m.Level != 0 {
- n += 1 + sovControl(uint64(m.Level))
- }
- l = len(m.Short)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
}
- if len(m.Detail) > 0 {
- for _, b := range m.Detail {
- l = len(b)
- n += 1 + l + sovControl(uint64(l))
- }
+ if m.NumCompletedSteps != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.NumCompletedSteps))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x88
}
- l = len(m.Url)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if m.NumTotalSteps != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.NumTotalSteps))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x80
}
- if m.Info != nil {
- l = m.Info.Size()
- n += 1 + l + sovControl(uint64(l))
+ if m.NumCachedSteps != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.NumCachedSteps))
+ i--
+ dAtA[i] = 0x78
}
- if len(m.Ranges) > 0 {
- for _, e := range m.Ranges {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
+ if m.Pinned {
+ i--
+ if m.Pinned {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
+ i--
+ dAtA[i] = 0x70
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if m.Trace != nil {
+ {
+ size, err := m.Trace.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6a
}
- return n
-}
-
-func (m *BytesMessage) Size() (n int) {
- if m == nil {
- return 0
+ if m.Generation != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Generation))
+ i--
+ dAtA[i] = 0x60
}
- var l int
- _ = l
- l = len(m.Data)
- if l > 0 {
- n += 1 + l + sovControl(uint64(l))
+ if len(m.Results) > 0 {
+ for k := range m.Results {
+ v := m.Results[k]
+ baseI := i
+ if v != nil {
+ {
+ size, err := v.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x5a
+ }
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if m.Result != nil {
+ {
+ size, err := m.Result.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
}
- return n
-}
-
-func (m *ListWorkersRequest) Size() (n int) {
- if m == nil {
- return 0
+ if len(m.ExporterResponse) > 0 {
+ for k := range m.ExporterResponse {
+ v := m.ExporterResponse[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x4a
+ }
+ }
+ if m.Logs != nil {
+ {
+ size, err := m.Logs.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.CompletedAt != nil {
+ n21, err21 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CompletedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CompletedAt):])
+ if err21 != nil {
+ return 0, err21
+ }
+ i -= n21
+ i = encodeVarintControl(dAtA, i, uint64(n21))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.CreatedAt != nil {
+ n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.CreatedAt):])
+ if err22 != nil {
+ return 0, err22
+ }
+ i -= n22
+ i = encodeVarintControl(dAtA, i, uint64(n22))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.Error != nil {
+ {
+ size, err := m.Error.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.Exporters) > 0 {
+ for iNdEx := len(m.Exporters) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exporters[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.FrontendAttrs) > 0 {
+ for k := range m.FrontendAttrs {
+ v := m.FrontendAttrs[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Frontend) > 0 {
+ i -= len(m.Frontend)
+ copy(dAtA[i:], m.Frontend)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Ref) > 0 {
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Ref)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UpdateBuildHistoryRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *UpdateBuildHistoryRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateBuildHistoryRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Filter) > 0 {
- for _, s := range m.Filter {
- l = len(s)
- n += 1 + l + sovControl(uint64(l))
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Delete {
+ i--
+ if m.Delete {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Pinned {
+ i--
+ if m.Pinned {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Ref) > 0 {
+ i -= len(m.Ref)
+ copy(dAtA[i:], m.Ref)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Ref)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UpdateBuildHistoryResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UpdateBuildHistoryResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UpdateBuildHistoryResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Descriptor) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Descriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Annotations) > 0 {
+ for k := range m.Annotations {
+ v := m.Annotations[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.Size_ != 0 {
+ i = encodeVarintControl(dAtA, i, uint64(m.Size_))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Digest) > 0 {
+ i -= len(m.Digest)
+ copy(dAtA[i:], m.Digest)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Digest)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.MediaType) > 0 {
+ i -= len(m.MediaType)
+ copy(dAtA[i:], m.MediaType)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.MediaType)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BuildResultInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildResultInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildResultInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Attestations) > 0 {
+ for iNdEx := len(m.Attestations) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Attestations[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Result != nil {
+ {
+ size, err := m.Result.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintControl(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Exporter) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Exporter) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Exporter) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Attrs) > 0 {
+ for k := range m.Attrs {
+ v := m.Attrs[k]
+ baseI := i
+ i -= len(v)
+ copy(dAtA[i:], v)
+ i = encodeVarintControl(dAtA, i, uint64(len(v)))
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = encodeVarintControl(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = encodeVarintControl(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = encodeVarintControl(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintControl(dAtA []byte, offset int, v uint64) int {
+ offset -= sovControl(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *PruneRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Filter) > 0 {
+ for _, s := range m.Filter {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.All {
+ n += 2
+ }
+ if m.KeepDuration != 0 {
+ n += 1 + sovControl(uint64(m.KeepDuration))
+ }
+ if m.KeepBytes != 0 {
+ n += 1 + sovControl(uint64(m.KeepBytes))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DiskUsageRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Filter) > 0 {
+ for _, s := range m.Filter {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DiskUsageResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Record) > 0 {
+ for _, e := range m.Record {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *UsageRecord) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Mutable {
+ n += 2
+ }
+ if m.InUse {
+ n += 2
+ }
+ if m.Size_ != 0 {
+ n += 1 + sovControl(uint64(m.Size_))
+ }
+ l = len(m.Parent)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
+ n += 1 + l + sovControl(uint64(l))
+ if m.LastUsedAt != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.UsageCount != 0 {
+ n += 1 + sovControl(uint64(m.UsageCount))
+ }
+ l = len(m.Description)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.RecordType)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Shared {
+ n += 2
+ }
+ if len(m.Parents) > 0 {
+ for _, s := range m.Parents {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SolveRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Ref)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Definition != nil {
+ l = m.Definition.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.Exporter)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.ExporterAttrs) > 0 {
+ for k, v := range m.ExporterAttrs {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ l = len(m.Session)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.Frontend)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.FrontendAttrs) > 0 {
+ for k, v := range m.FrontendAttrs {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ l = m.Cache.Size()
+ n += 1 + l + sovControl(uint64(l))
+ if len(m.Entitlements) > 0 {
+ for _, s := range m.Entitlements {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.FrontendInputs) > 0 {
+ for k, v := range m.FrontendInputs {
+ _ = k
+ _ = v
+ l = 0
+ if v != nil {
+ l = v.Size()
+ l += 1 + sovControl(uint64(l))
+ }
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if m.Internal {
+ n += 2
+ }
+ if m.SourcePolicy != nil {
+ l = m.SourcePolicy.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CacheOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ExportRefDeprecated)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.ImportRefsDeprecated) > 0 {
+ for _, s := range m.ImportRefsDeprecated {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.ExportAttrsDeprecated) > 0 {
+ for k, v := range m.ExportAttrsDeprecated {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Exports) > 0 {
+ for _, e := range m.Exports {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Imports) > 0 {
+ for _, e := range m.Imports {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CacheOptionsEntry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.Attrs) > 0 {
+ for k, v := range m.Attrs {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SolveResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ExporterResponse) > 0 {
+ for k, v := range m.ExporterResponse {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Ref)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Vertexes) > 0 {
+ for _, e := range m.Vertexes {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Statuses) > 0 {
+ for _, e := range m.Statuses {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Logs) > 0 {
+ for _, e := range m.Logs {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if len(m.Warnings) > 0 {
+ for _, e := range m.Warnings {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Vertex) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Digest)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.Inputs) > 0 {
+ for _, s := range m.Inputs {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Cached {
+ n += 2
+ }
+ if m.Started != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Completed != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.Error)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.ProgressGroup != nil {
+ l = m.ProgressGroup.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *VertexStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ID)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.Vertex)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Current != 0 {
+ n += 1 + sovControl(uint64(m.Current))
+ }
+ if m.Total != 0 {
+ n += 1 + sovControl(uint64(m.Total))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
+ n += 1 + l + sovControl(uint64(l))
+ if m.Started != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Completed != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *VertexLog) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Vertex)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
+ n += 1 + l + sovControl(uint64(l))
+ if m.Stream != 0 {
+ n += 1 + sovControl(uint64(m.Stream))
+ }
+ l = len(m.Msg)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *VertexWarning) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Vertex)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Level != 0 {
+ n += 1 + sovControl(uint64(m.Level))
+ }
+ l = len(m.Short)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.Detail) > 0 {
+ for _, b := range m.Detail {
+ l = len(b)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ l = len(m.Url)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Info != nil {
+ l = m.Info.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.Ranges) > 0 {
+ for _, e := range m.Ranges {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *BytesMessage) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Data)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ListWorkersRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Filter) > 0 {
+ for _, s := range m.Filter {
+ l = len(s)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ListWorkersResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Record) > 0 {
+ for _, e := range m.Record {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *InfoRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *InfoResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.BuildkitVersion != nil {
+ l = m.BuildkitVersion.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *BuildHistoryRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ActiveOnly {
+ n += 2
+ }
+ l = len(m.Ref)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.EarlyExit {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *BuildHistoryEvent) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovControl(uint64(m.Type))
+ }
+ if m.Record != nil {
+ l = m.Record.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *BuildHistoryRecord) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Ref)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.Frontend)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.FrontendAttrs) > 0 {
+ for k, v := range m.FrontendAttrs {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if len(m.Exporters) > 0 {
+ for _, e := range m.Exporters {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.Error != nil {
+ l = m.Error.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.CreatedAt != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.CreatedAt)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.CompletedAt != nil {
+ l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.CompletedAt)
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Logs != nil {
+ l = m.Logs.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.ExporterResponse) > 0 {
+ for k, v := range m.ExporterResponse {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if m.Result != nil {
+ l = m.Result.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.Results) > 0 {
+ for k, v := range m.Results {
+ _ = k
+ _ = v
+ l = 0
+ if v != nil {
+ l = v.Size()
+ l += 1 + sovControl(uint64(l))
+ }
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if m.Generation != 0 {
+ n += 1 + sovControl(uint64(m.Generation))
+ }
+ if m.Trace != nil {
+ l = m.Trace.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Pinned {
+ n += 2
+ }
+ if m.NumCachedSteps != 0 {
+ n += 1 + sovControl(uint64(m.NumCachedSteps))
+ }
+ if m.NumTotalSteps != 0 {
+ n += 2 + sovControl(uint64(m.NumTotalSteps))
+ }
+ if m.NumCompletedSteps != 0 {
+ n += 2 + sovControl(uint64(m.NumCompletedSteps))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *UpdateBuildHistoryRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Ref)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Pinned {
+ n += 2
+ }
+ if m.Delete {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *UpdateBuildHistoryResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Descriptor) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.MediaType)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ l = len(m.Digest)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if m.Size_ != 0 {
+ n += 1 + sovControl(uint64(m.Size_))
+ }
+ if len(m.Annotations) > 0 {
+ for k, v := range m.Annotations {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *BuildResultInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Result != nil {
+ l = m.Result.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.Attestations) > 0 {
+ for _, e := range m.Attestations {
+ l = e.Size()
+ n += 1 + l + sovControl(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Exporter) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ if l > 0 {
+ n += 1 + l + sovControl(uint64(l))
+ }
+ if len(m.Attrs) > 0 {
+ for k, v := range m.Attrs {
+ _ = k
+ _ = v
+ mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v)))
+ n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovControl(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozControl(x uint64) (n int) {
+ return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *PruneRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field All", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.All = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType)
+ }
+ m.KeepDuration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.KeepDuration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType)
+ }
+ m.KeepBytes = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.KeepBytes |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Record = append(m.Record, &UsageRecord{})
+ if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UsageRecord) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Mutable = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.InUse = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+ }
+ m.Size_ = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Size_ |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Parent = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LastUsedAt == nil {
+ m.LastUsedAt = new(time.Time)
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType)
+ }
+ m.UsageCount = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UsageCount |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Description = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RecordType = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Shared = bool(v != 0)
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Parents", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Parents = append(m.Parents, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SolveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ref = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Definition == nil {
+ m.Definition = &pb.Definition{}
+ }
+ if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exporter = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExporterAttrs == nil {
+ m.ExporterAttrs = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.ExporterAttrs[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Session = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Frontend = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FrontendAttrs == nil {
+ m.FrontendAttrs = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.FrontendAttrs[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Entitlements", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.FrontendInputs == nil {
+ m.FrontendInputs = make(map[string]*pb.Definition)
+ }
+ var mapkey string
+ var mapvalue *pb.Definition
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &pb.Definition{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.FrontendInputs[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Internal = bool(v != 0)
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SourcePolicy", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.SourcePolicy == nil {
+ m.SourcePolicy = &pb1.Policy{}
+ }
+ if err := m.SourcePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CacheOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExportRefDeprecated", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ExportRefDeprecated = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ImportRefsDeprecated", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ImportRefsDeprecated = append(m.ImportRefsDeprecated, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrsDeprecated", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ExportAttrsDeprecated == nil {
+ m.ExportAttrsDeprecated = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.ExportAttrsDeprecated[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exports = append(m.Exports, &CacheOptionsEntry{})
+ if err := m.Exports[len(m.Exports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Imports", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Imports = append(m.Imports, &CacheOptionsEntry{})
+ if err := m.Imports[len(m.Imports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Type = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Attrs == nil {
+ m.Attrs = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Attrs[mapkey] = mapvalue
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
}
}
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-func (m *ListWorkersResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Record) > 0 {
- for _, e := range m.Record {
- l = e.Size()
- n += 1 + l + sovControl(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- return n
-}
-
-func sovControl(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozControl(x uint64) (n int) {
- return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+ return nil
}
-func (m *PruneRequest) Unmarshal(dAtA []byte) error {
+func (m *SolveResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3435,17 +7198,17 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -3455,82 +7218,119 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field All", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.All = bool(v != 0)
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType)
- }
- m.KeepDuration = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.KeepDuration |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType)
+ if m.ExporterResponse == nil {
+ m.ExporterResponse = make(map[string]string)
}
- m.KeepBytes = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- b := dAtA[iNdEx]
- iNdEx++
- m.KeepBytes |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
}
+ m.ExporterResponse[mapkey] = mapvalue
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:])
@@ -3553,7 +7353,7 @@ func (m *PruneRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error {
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3576,15 +7376,15 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -3612,7 +7412,7 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
+ m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -3636,7 +7436,7 @@ func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error {
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3659,15 +7459,15 @@ func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3694,8 +7494,110 @@ func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Record = append(m.Record, &UsageRecord{})
- if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Vertexes = append(m.Vertexes, &Vertex{})
+ if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Statuses = append(m.Statuses, &VertexStatus{})
+ if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Logs = append(m.Logs, &VertexLog{})
+ if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Warnings = append(m.Warnings, &VertexWarning{})
+ if err := m.Warnings[len(m.Warnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -3721,7 +7623,7 @@ func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *UsageRecord) Unmarshal(dAtA []byte) error {
+func (m *Vertex) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -3744,15 +7646,15 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group")
+ return fmt.Errorf("proto: Vertex: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -3780,13 +7682,13 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ID = string(dAtA[iNdEx:postIndex])
+ m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType)
}
- var v int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -3796,54 +7698,27 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.Mutable = bool(v != 0)
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
}
- m.InUse = bool(v != 0)
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
}
- m.Size_ = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Size_ |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
}
- case 5:
+ m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -3871,13 +7746,13 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Parent = string(dAtA[iNdEx:postIndex])
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType)
}
- var msglen int
+ var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -3887,28 +7762,15 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
+ m.Cached = bool(v != 0)
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -3935,37 +7797,18 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.LastUsedAt == nil {
- m.LastUsedAt = new(time.Time)
- }
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType)
- }
- m.UsageCount = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.UsageCount |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ if m.Started == nil {
+ m.Started = new(time.Time)
}
- case 9:
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -3975,27 +7818,31 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Description = string(dAtA[iNdEx:postIndex])
+ if m.Completed == nil {
+ m.Completed = new(time.Time)
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 10:
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4023,33 +7870,13 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.RecordType = string(dAtA[iNdEx:postIndex])
+ m.Error = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Shared = bool(v != 0)
- case 12:
+ case 8:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Parents", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressGroup", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -4059,23 +7886,27 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Parents = append(m.Parents, string(dAtA[iNdEx:postIndex]))
+ if m.ProgressGroup == nil {
+ m.ProgressGroup = &pb.ProgressGroup{}
+ }
+ if err := m.ProgressGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4099,7 +7930,7 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *SolveRequest) Unmarshal(dAtA []byte) error {
+func (m *VertexStatus) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4122,15 +7953,15 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4158,47 +7989,11 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Ref = string(dAtA[iNdEx:postIndex])
+ m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Definition == nil {
- m.Definition = &pb.Definition{}
- }
- if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4216,148 +8011,21 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
}
}
intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Exporter = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.ExporterAttrs == nil {
- m.ExporterAttrs = make(map[string]string)
- }
- var mapkey string
- var mapvalue string
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
}
- m.ExporterAttrs[mapkey] = mapvalue
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 5:
+ case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4385,13 +8053,51 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Session = string(dAtA[iNdEx:postIndex])
+ m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
+ }
+ m.Current = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Current |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
+ }
+ m.Total = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Total |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -4401,27 +8107,28 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Frontend = string(dAtA[iNdEx:postIndex])
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4448,109 +8155,105 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.FrontendAttrs == nil {
- m.FrontendAttrs = make(map[string]string)
+ if m.Started == nil {
+ m.Started = new(time.Time)
}
- var mapkey string
- var mapvalue string
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
}
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
}
}
- m.FrontendAttrs[mapkey] = mapvalue
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Completed == nil {
+ m.Completed = new(time.Time)
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 8:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *VertexLog) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: VertexLog: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType)
}
- var msglen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -4560,30 +8263,29 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 9:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Entitlements", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -4593,29 +8295,49 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex]))
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 10:
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+ }
+ m.Stream = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Stream |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType)
}
- var msglen int
+ var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -4625,120 +8347,25 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ if byteLen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.FrontendInputs == nil {
- m.FrontendInputs = make(map[string]*pb.Definition)
- }
- var mapkey string
- var mapvalue *pb.Definition
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var mapmsglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- mapmsglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if mapmsglen < 0 {
- return ErrInvalidLengthControl
- }
- postmsgIndex := iNdEx + mapmsglen
- if postmsgIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postmsgIndex > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = &pb.Definition{}
- if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
- return err
- }
- iNdEx = postmsgIndex
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+ m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...)
+ if m.Msg == nil {
+ m.Msg = []byte{}
}
- m.FrontendInputs[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -4762,7 +8389,7 @@ func (m *SolveRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *CacheOptions) Unmarshal(dAtA []byte) error {
+func (m *VertexWarning) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -4785,17 +8412,102 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group")
+ return fmt.Errorf("proto: VertexWarning: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: VertexWarning: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExportRefDeprecated", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+ }
+ m.Level = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Level |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Short", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Short = append(m.Short[:0], dAtA[iNdEx:postIndex]...)
+ if m.Short == nil {
+ m.Short = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType)
}
- var stringLen uint64
+ var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -4805,27 +8517,27 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if byteLen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ExportRefDeprecated = string(dAtA[iNdEx:postIndex])
+ m.Detail = append(m.Detail, make([]byte, postIndex-iNdEx))
+ copy(m.Detail[len(m.Detail)-1], dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 2:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ImportRefsDeprecated", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -4853,11 +8565,11 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ImportRefsDeprecated = append(m.ImportRefsDeprecated, string(dAtA[iNdEx:postIndex]))
+ m.Url = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 3:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrsDeprecated", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -4884,107 +8596,16 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ExportAttrsDeprecated == nil {
- m.ExportAttrsDeprecated = make(map[string]string)
+ if m.Info == nil {
+ m.Info = &pb.SourceInfo{}
}
- var mapkey string
- var mapvalue string
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+ if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.ExportAttrsDeprecated[mapkey] = mapvalue
iNdEx = postIndex
- case 4:
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5011,16 +8632,67 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Exports = append(m.Exports, &CacheOptionsEntry{})
- if err := m.Exports[len(m.Exports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ m.Ranges = append(m.Ranges, &pb.Range{})
+ if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 5:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *BytesMessage) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Imports", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
- var msglen int
+ var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -5030,24 +8702,24 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ if byteLen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + msglen
+ postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Imports = append(m.Imports, &CacheOptionsEntry{})
- if err := m.Imports[len(m.Imports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
}
iNdEx = postIndex
default:
@@ -5072,7 +8744,7 @@ func (m *CacheOptions) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
+func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5095,15 +8767,15 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group")
+ return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5131,11 +8803,62 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Type = string(dAtA[iNdEx:postIndex])
+ m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
- case 2:
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5162,103 +8885,10 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Attrs == nil {
- m.Attrs = make(map[string]string)
- }
- var mapkey string
- var mapvalue string
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+ m.Record = append(m.Record, &types.WorkerRecord{})
+ if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.Attrs[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5282,7 +8912,7 @@ func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *SolveResponse) Unmarshal(dAtA []byte) error {
+func (m *InfoRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5305,15 +8935,66 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InfoResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field BuildkitVersion", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5340,103 +9021,12 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ExporterResponse == nil {
- m.ExporterResponse = make(map[string]string)
+ if m.BuildkitVersion == nil {
+ m.BuildkitVersion = &types.BuildkitVersion{}
}
- var mapkey string
- var mapvalue string
- for iNdEx < postIndex {
- entryPreIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- if fieldNum == 1 {
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- } else if fieldNum == 2 {
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue < 0 {
- return ErrInvalidLengthControl
- }
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- } else {
- iNdEx = entryPreIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > postIndex {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
+ if err := m.BuildkitVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.ExporterResponse[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -5460,7 +9050,7 @@ func (m *SolveResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+func (m *BuildHistoryRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5483,13 +9073,33 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: BuildHistoryRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: BuildHistoryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ActiveOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ActiveOnly = bool(v != 0)
+ case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
@@ -5521,6 +9131,26 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error {
}
m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field EarlyExit", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.EarlyExit = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:])
@@ -5543,7 +9173,7 @@ func (m *StatusRequest) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+func (m *BuildHistoryEvent) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5566,17 +9196,17 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+ return fmt.Errorf("proto: BuildHistoryEvent: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: BuildHistoryEvent: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType)
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
- var msglen int
+ m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -5586,63 +9216,14 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ m.Type |= BuildHistoryEventType(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Vertexes = append(m.Vertexes, &Vertex{})
- if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Statuses = append(m.Statuses, &VertexStatus{})
- if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5669,42 +9250,10 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Logs = append(m.Logs, &VertexLog{})
- if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ if m.Record == nil {
+ m.Record = &BuildHistoryRecord{}
}
- m.Warnings = append(m.Warnings, &VertexWarning{})
- if err := m.Warnings[len(m.Warnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
@@ -5730,7 +9279,7 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *Vertex) Unmarshal(dAtA []byte) error {
+func (m *BuildHistoryRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -5753,15 +9302,15 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: Vertex: wiretype end group for non-group")
+ return fmt.Errorf("proto: BuildHistoryRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: BuildHistoryRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5789,11 +9338,11 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+ m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -5821,13 +9370,13 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]))
+ m.Frontend = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -5837,47 +9386,122 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType)
+ if m.FrontendAttrs == nil {
+ m.FrontendAttrs = make(map[string]string)
}
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
}
- m.Cached = bool(v != 0)
- case 5:
+ m.FrontendAttrs[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 4:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Exporters", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5904,16 +9528,14 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Started == nil {
- m.Started = new(time.Time)
- }
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil {
+ m.Exporters = append(m.Exporters, &Exporter{})
+ if err := m.Exporters[len(m.Exporters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 6:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -5940,18 +9562,18 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Completed == nil {
- m.Completed = new(time.Time)
+ if m.Error == nil {
+ m.Error = &rpc.Status{}
}
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 7:
+ case 6:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -5961,27 +9583,31 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Error = string(dAtA[iNdEx:postIndex])
+ if m.CreatedAt == nil {
+ m.CreatedAt = new(time.Time)
+ }
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
iNdEx = postIndex
- case 8:
+ case 7:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProgressGroup", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6008,69 +9634,18 @@ func (m *Vertex) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.ProgressGroup == nil {
- m.ProgressGroup = &pb.ProgressGroup{}
+ if m.CompletedAt == nil {
+ m.CompletedAt = new(time.Time)
}
- if err := m.ProgressGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.CompletedAt, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *VertexStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 8:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6080,61 +9655,33 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.ID = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthControl
+ if m.Logs == nil {
+ m.Logs = &Descriptor{}
}
- if postIndex > l {
- return io.ErrUnexpectedEOF
+ if err := m.Logs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
- m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 3:
+ case 9:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType)
}
- var stringLen uint64
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6144,65 +9691,122 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + intStringLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
- }
- m.Current = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Current |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
+ if m.ExporterResponse == nil {
+ m.ExporterResponse = make(map[string]string)
}
- m.Total = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
}
- b := dAtA[iNdEx]
- iNdEx++
- m.Total |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
}
}
- case 6:
+ m.ExporterResponse[mapkey] = mapvalue
+ iNdEx = postIndex
+ case 10:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6229,13 +9833,16 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+ if m.Result == nil {
+ m.Result = &BuildResultInfo{}
+ }
+ if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- case 7:
+ case 11:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Results", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6262,16 +9869,128 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Started == nil {
- m.Started = new(time.Time)
+ if m.Results == nil {
+ m.Results = make(map[string]*BuildResultInfo)
}
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil {
- return err
+ var mapkey string
+ var mapvalue *BuildResultInfo
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var mapmsglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ mapmsglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if mapmsglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postmsgIndex := iNdEx + mapmsglen
+ if postmsgIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postmsgIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = &BuildResultInfo{}
+ if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+ return err
+ }
+ iNdEx = postmsgIndex
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
+ m.Results[mapkey] = mapvalue
iNdEx = postIndex
- case 8:
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType)
+ }
+ m.Generation = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Generation |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 13:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6298,69 +10017,18 @@ func (m *VertexStatus) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- if m.Completed == nil {
- m.Completed = new(time.Time)
+ if m.Trace == nil {
+ m.Trace = &Descriptor{}
}
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil {
+ if err := m.Trace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *VertexLog) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: VertexLog: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType)
+ case 14:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType)
}
- var stringLen uint64
+ var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6370,29 +10038,17 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- stringLen |= uint64(b&0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ m.Pinned = bool(v != 0)
+ case 15:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumCachedSteps", wireType)
}
- var msglen int
+ m.NumCachedSteps = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6402,30 +10058,16 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ m.NumCachedSteps |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
+ case 16:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field NumTotalSteps", wireType)
}
- m.Stream = 0
+ m.NumTotalSteps = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6435,16 +10077,16 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Stream |= int64(b&0x7F) << shift
+ m.NumTotalSteps |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType)
- }
- var byteLen int
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NumCompletedSteps", wireType)
+ }
+ m.NumCompletedSteps = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6454,26 +10096,11 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ m.NumCompletedSteps |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...)
- if m.Msg == nil {
- m.Msg = []byte{}
- }
- iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipControl(dAtA[iNdEx:])
@@ -6496,7 +10123,7 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *VertexWarning) Unmarshal(dAtA []byte) error {
+func (m *UpdateBuildHistoryRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6519,15 +10146,15 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: VertexWarning: wiretype end group for non-group")
+ return fmt.Errorf("proto: UpdateBuildHistoryRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: VertexWarning: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: UpdateBuildHistoryRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6555,13 +10182,13 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+ m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType)
}
- m.Level = 0
+ var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6571,16 +10198,17 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Level |= int64(b&0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
+ m.Pinned = bool(v != 0)
case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Short", wireType)
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Delete", wireType)
}
- var byteLen int
+ var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6590,31 +10218,119 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ m.Delete = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UpdateBuildHistoryResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UpdateBuildHistoryResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UpdateBuildHistoryResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthControl
}
- if postIndex > l {
+ if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.Short = append(m.Short[:0], dAtA[iNdEx:postIndex]...)
- if m.Short == nil {
- m.Short = []byte{}
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Descriptor) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
}
- iNdEx = postIndex
- case 4:
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Descriptor: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
}
- var byteLen int
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6624,27 +10340,27 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + byteLen
+ postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Detail = append(m.Detail, make([]byte, postIndex-iNdEx))
- copy(m.Detail[len(m.Detail)-1], dAtA[iNdEx:postIndex])
+ m.MediaType = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 5:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6672,13 +10388,13 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Url = string(dAtA[iNdEx:postIndex])
+ m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
}
- var msglen int
+ m.Size_ = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6688,31 +10404,14 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ m.Size_ |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Info == nil {
- m.Info = &pb.SourceInfo{}
- }
- if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
+ case 5:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6729,20 +10428,113 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
break
}
}
- if msglen < 0 {
- return ErrInvalidLengthControl
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthControl
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Ranges = append(m.Ranges, &pb.Range{})
- if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Annotations == nil {
+ m.Annotations = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+ m.Annotations[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
@@ -6766,7 +10558,7 @@ func (m *VertexWarning) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *BytesMessage) Unmarshal(dAtA []byte) error {
+func (m *BuildResultInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6789,17 +10581,17 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
+ return fmt.Errorf("proto: BuildResultInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: BuildResultInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
}
- var byteLen int
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowControl
@@ -6809,24 +10601,60 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- byteLen |= int(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if byteLen < 0 {
+ if msglen < 0 {
return ErrInvalidLengthControl
}
- postIndex := iNdEx + byteLen
+ postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthControl
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
+ if m.Result == nil {
+ m.Result = &Descriptor{}
+ }
+ if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Attestations", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthControl
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Attestations = append(m.Attestations, &Descriptor{})
+ if err := m.Attestations[len(m.Attestations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
}
iNdEx = postIndex
default:
@@ -6851,7 +10679,7 @@ func (m *BytesMessage) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error {
+func (m *Exporter) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -6874,15 +10702,15 @@ func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: Exporter: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: Exporter: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
@@ -6910,62 +10738,11 @@ func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex]))
+ m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipControl(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthControl
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowControl
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -6992,10 +10769,103 @@ func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Record = append(m.Record, &types.WorkerRecord{})
- if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ if m.Attrs == nil {
+ m.Attrs = make(map[string]string)
+ }
+ var mapkey string
+ var mapvalue string
+ for iNdEx < postIndex {
+ entryPreIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ if fieldNum == 1 {
+ var stringLenmapkey uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapkey |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapkey := int(stringLenmapkey)
+ if intStringLenmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapkey := iNdEx + intStringLenmapkey
+ if postStringIndexmapkey < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapkey > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
+ iNdEx = postStringIndexmapkey
+ } else if fieldNum == 2 {
+ var stringLenmapvalue uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowControl
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLenmapvalue |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLenmapvalue := int(stringLenmapvalue)
+ if intStringLenmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+ if postStringIndexmapvalue < 0 {
+ return ErrInvalidLengthControl
+ }
+ if postStringIndexmapvalue > l {
+ return io.ErrUnexpectedEOF
+ }
+ mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
+ iNdEx = postStringIndexmapvalue
+ } else {
+ iNdEx = entryPreIndex
+ skippy, err := skipControl(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthControl
+ }
+ if (iNdEx + skippy) > postIndex {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
+ m.Attrs[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
diff --git a/api/services/control/control.proto b/api/services/control/control.proto
index a468a293af88..327c9eeaf420 100644
--- a/api/services/control/control.proto
+++ b/api/services/control/control.proto
@@ -6,6 +6,9 @@ import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
import "github.com/moby/buildkit/api/types/worker.proto";
+// import "github.com/containerd/containerd/api/types/descriptor.proto";
+import "github.com/gogo/googleapis/google/rpc/status.proto";
+import "github.com/moby/buildkit/sourcepolicy/pb/policy.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
@@ -18,7 +21,10 @@ service Control {
rpc Status(StatusRequest) returns (stream StatusResponse);
rpc Session(stream BytesMessage) returns (stream BytesMessage);
rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse);
- // rpc Info(InfoRequest) returns (InfoResponse);
+ rpc Info(InfoRequest) returns (InfoResponse);
+
+ rpc ListenBuildHistory(BuildHistoryRequest) returns (stream BuildHistoryEvent);
+ rpc UpdateBuildHistory(UpdateBuildHistoryRequest) returns (UpdateBuildHistoryResponse);
}
message PruneRequest {
@@ -62,6 +68,8 @@ message SolveRequest {
CacheOptions Cache = 8 [(gogoproto.nullable) = false];
repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ];
map FrontendInputs = 10;
+ bool Internal = 11; // Internal builds are not recorded in build history
+ moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12;
}
message CacheOptions {
@@ -157,3 +165,73 @@ message ListWorkersRequest {
message ListWorkersResponse {
repeated moby.buildkit.v1.types.WorkerRecord record = 1;
}
+
+message InfoRequest {}
+
+message InfoResponse {
+ moby.buildkit.v1.types.BuildkitVersion buildkitVersion = 1;
+}
+
+message BuildHistoryRequest {
+ bool ActiveOnly = 1;
+ string Ref = 2;
+ bool EarlyExit = 3;
+}
+
+enum BuildHistoryEventType {
+ STARTED = 0;
+ COMPLETE = 1;
+ DELETED = 2;
+}
+
+message BuildHistoryEvent {
+ BuildHistoryEventType type = 1;
+ BuildHistoryRecord record = 2;
+}
+
+message BuildHistoryRecord {
+ string Ref = 1;
+ string Frontend = 2;
+ map FrontendAttrs = 3;
+ repeated Exporter Exporters = 4;
+ google.rpc.Status error = 5;
+ google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true];
+ google.protobuf.Timestamp CompletedAt = 7 [(gogoproto.stdtime) = true];
+ Descriptor logs = 8;
+ map ExporterResponse = 9;
+ BuildResultInfo Result = 10;
+ map Results = 11;
+ int32 Generation = 12;
+ Descriptor trace = 13;
+ bool pinned = 14;
+ int32 numCachedSteps = 15;
+ int32 numTotalSteps = 16;
+ int32 numCompletedSteps = 17;
+ // TODO: tags
+ // TODO: unclipped logs
+}
+
+message UpdateBuildHistoryRequest {
+ string Ref = 1;
+ bool Pinned = 2;
+ bool Delete = 3;
+}
+
+message UpdateBuildHistoryResponse {}
+
+message Descriptor {
+ string media_type = 1;
+ string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+ int64 size = 3;
+ map annotations = 5;
+}
+
+message BuildResultInfo {
+ Descriptor Result = 1;
+ repeated Descriptor Attestations = 2;
+}
+
+message Exporter {
+ string Type = 1;
+ map Attrs = 2;
+}
diff --git a/api/types/worker.pb.go b/api/types/worker.pb.go
index 54cbd605e14c..e1b3928cba52 100644
--- a/api/types/worker.pb.go
+++ b/api/types/worker.pb.go
@@ -29,6 +29,7 @@ type WorkerRecord struct {
Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms"`
GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"`
+ BuildkitVersion *BuildkitVersion `protobuf:"bytes,5,opt,name=BuildkitVersion,proto3" json:"BuildkitVersion,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
@@ -95,6 +96,13 @@ func (m *WorkerRecord) GetGCPolicy() []*GCPolicy {
return nil
}
+func (m *WorkerRecord) GetBuildkitVersion() *BuildkitVersion {
+ if m != nil {
+ return m.BuildkitVersion
+ }
+ return nil
+}
+
type GCPolicy struct {
All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"`
KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"`
@@ -166,39 +174,106 @@ func (m *GCPolicy) GetFilters() []string {
return nil
}
+type BuildkitVersion struct {
+ Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *BuildkitVersion) Reset() { *m = BuildkitVersion{} }
+func (m *BuildkitVersion) String() string { return proto.CompactTextString(m) }
+func (*BuildkitVersion) ProtoMessage() {}
+func (*BuildkitVersion) Descriptor() ([]byte, []int) {
+ return fileDescriptor_e4ff6184b07e587a, []int{2}
+}
+func (m *BuildkitVersion) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BuildkitVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BuildkitVersion.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *BuildkitVersion) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BuildkitVersion.Merge(m, src)
+}
+func (m *BuildkitVersion) XXX_Size() int {
+ return m.Size()
+}
+func (m *BuildkitVersion) XXX_DiscardUnknown() {
+ xxx_messageInfo_BuildkitVersion.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BuildkitVersion proto.InternalMessageInfo
+
+func (m *BuildkitVersion) GetPackage() string {
+ if m != nil {
+ return m.Package
+ }
+ return ""
+}
+
+func (m *BuildkitVersion) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *BuildkitVersion) GetRevision() string {
+ if m != nil {
+ return m.Revision
+ }
+ return ""
+}
+
func init() {
proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord")
proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.types.WorkerRecord.LabelsEntry")
proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy")
+ proto.RegisterType((*BuildkitVersion)(nil), "moby.buildkit.v1.types.BuildkitVersion")
}
func init() { proto.RegisterFile("worker.proto", fileDescriptor_e4ff6184b07e587a) }
var fileDescriptor_e4ff6184b07e587a = []byte{
- // 355 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40,
- 0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15,
- 0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01,
- 0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89,
- 0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41,
- 0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13,
- 0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30,
- 0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3,
- 0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b,
- 0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07,
- 0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91,
- 0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb,
- 0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf,
- 0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0,
- 0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b,
- 0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65,
- 0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70,
- 0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5,
- 0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8,
- 0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99,
- 0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6,
- 0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29,
- 0x02, 0x00, 0x00,
+ // 416 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x52, 0xc1, 0x8e, 0xd3, 0x30,
+ 0x10, 0x25, 0xc9, 0xee, 0xd2, 0xb8, 0x11, 0x20, 0x0b, 0xa1, 0x28, 0x42, 0x25, 0xca, 0x85, 0x1e,
+ 0xc0, 0x59, 0x96, 0x0b, 0x20, 0x4e, 0xa1, 0x08, 0x56, 0xe2, 0xb0, 0xf8, 0x00, 0x67, 0x3b, 0xeb,
+ 0x86, 0x28, 0xee, 0xda, 0x72, 0x9c, 0x40, 0xfe, 0xb0, 0x47, 0xbe, 0x00, 0xa1, 0x1e, 0xf8, 0x0e,
+ 0x64, 0x27, 0x69, 0x4b, 0xd9, 0xde, 0xe6, 0xcd, 0xbc, 0xf7, 0x3c, 0xf3, 0x64, 0x10, 0x7c, 0x17,
+ 0xaa, 0x62, 0x0a, 0x49, 0x25, 0xb4, 0x80, 0x8f, 0x56, 0x82, 0x76, 0x88, 0x36, 0x25, 0xbf, 0xae,
+ 0x4a, 0x8d, 0xda, 0x17, 0x48, 0x77, 0x92, 0xd5, 0xd1, 0xf3, 0xa2, 0xd4, 0xdf, 0x1a, 0x8a, 0x72,
+ 0xb1, 0x4a, 0x0b, 0x51, 0x88, 0xd4, 0xd2, 0x69, 0xb3, 0xb4, 0xc8, 0x02, 0x5b, 0xf5, 0x36, 0xd1,
+ 0xb3, 0x3d, 0xba, 0x71, 0x4c, 0x47, 0xc7, 0xb4, 0x16, 0xbc, 0x65, 0x2a, 0x95, 0x34, 0x15, 0xb2,
+ 0xee, 0xd9, 0xc9, 0x1f, 0x17, 0x04, 0x5f, 0xed, 0x16, 0x98, 0xe5, 0x42, 0x5d, 0xc3, 0x7b, 0xc0,
+ 0xbd, 0x5c, 0x84, 0x4e, 0xec, 0xcc, 0x7d, 0xec, 0x5e, 0x2e, 0xe0, 0x47, 0x70, 0xf6, 0x89, 0x50,
+ 0xc6, 0xeb, 0xd0, 0x8d, 0xbd, 0xf9, 0xf4, 0xe2, 0x1c, 0xdd, 0xbe, 0x26, 0xda, 0x77, 0x41, 0xbd,
+ 0xe4, 0xfd, 0x8d, 0x56, 0x1d, 0x1e, 0xf4, 0xf0, 0x1c, 0xf8, 0x92, 0x13, 0xbd, 0x14, 0x6a, 0x55,
+ 0x87, 0x9e, 0x35, 0x0b, 0x90, 0xa4, 0xe8, 0x6a, 0x68, 0x66, 0x27, 0xeb, 0x5f, 0x4f, 0xee, 0xe0,
+ 0x1d, 0x09, 0xbe, 0x05, 0x93, 0x0f, 0xef, 0xae, 0x04, 0x2f, 0xf3, 0x2e, 0x3c, 0xb1, 0x82, 0xf8,
+ 0xd8, 0xeb, 0x23, 0x0f, 0x6f, 0x15, 0xf0, 0x33, 0xb8, 0x9f, 0x0d, 0xbc, 0x2f, 0x4c, 0xd5, 0xa5,
+ 0xb8, 0x09, 0x4f, 0x63, 0x67, 0x3e, 0xbd, 0x78, 0x7a, 0xcc, 0xe4, 0x80, 0x8e, 0x0f, 0xf5, 0xd1,
+ 0x6b, 0x30, 0xdd, 0xbb, 0x0c, 0x3e, 0x00, 0x5e, 0xc5, 0xba, 0x21, 0x2c, 0x53, 0xc2, 0x87, 0xe0,
+ 0xb4, 0x25, 0xbc, 0x61, 0xa1, 0x6b, 0x7b, 0x3d, 0x78, 0xe3, 0xbe, 0x72, 0x92, 0x1f, 0xbb, 0x5b,
+ 0x8c, 0x8e, 0x70, 0x6e, 0x75, 0x13, 0x6c, 0x4a, 0x98, 0x80, 0xa0, 0x62, 0x4c, 0x2e, 0x1a, 0x45,
+ 0xb4, 0x59, 0xd4, 0xc8, 0x3d, 0xfc, 0x4f, 0x0f, 0x3e, 0x06, 0xbe, 0xc1, 0x59, 0xa7, 0x99, 0xc9,
+ 0xcf, 0x10, 0x76, 0x0d, 0x18, 0x82, 0xbb, 0xcb, 0x92, 0x6b, 0xa6, 0x6a, 0x1b, 0x95, 0x8f, 0x47,
+ 0x98, 0x90, 0xff, 0x72, 0x30, 0x64, 0x49, 0xf2, 0x8a, 0x14, 0x6c, 0x58, 0x7e, 0x84, 0x66, 0xd2,
+ 0x0e, 0x61, 0xf5, 0x27, 0x8c, 0x10, 0x46, 0x60, 0xa2, 0x58, 0x5b, 0xda, 0x91, 0x67, 0x47, 0x5b,
+ 0x9c, 0x05, 0xeb, 0xcd, 0xcc, 0xf9, 0xb9, 0x99, 0x39, 0xbf, 0x37, 0x33, 0x87, 0x9e, 0xd9, 0xaf,
+ 0xf5, 0xf2, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x19, 0xcf, 0xd5, 0xdf, 0x02, 0x00, 0x00,
}
func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
@@ -225,6 +300,18 @@ func (m *WorkerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
+ if m.BuildkitVersion != nil {
+ {
+ size, err := m.BuildkitVersion.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintWorker(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
if len(m.GCPolicy) > 0 {
for iNdEx := len(m.GCPolicy) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -338,6 +425,54 @@ func (m *GCPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *BuildkitVersion) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BuildkitVersion) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BuildkitVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Revision) > 0 {
+ i -= len(m.Revision)
+ copy(dAtA[i:], m.Revision)
+ i = encodeVarintWorker(dAtA, i, uint64(len(m.Revision)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintWorker(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Package) > 0 {
+ i -= len(m.Package)
+ copy(dAtA[i:], m.Package)
+ i = encodeVarintWorker(dAtA, i, uint64(len(m.Package)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintWorker(dAtA []byte, offset int, v uint64) int {
offset -= sovWorker(v)
base := offset
@@ -379,6 +514,10 @@ func (m *WorkerRecord) Size() (n int) {
n += 1 + l + sovWorker(uint64(l))
}
}
+ if m.BuildkitVersion != nil {
+ l = m.BuildkitVersion.Size()
+ n += 1 + l + sovWorker(uint64(l))
+ }
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
@@ -412,6 +551,30 @@ func (m *GCPolicy) Size() (n int) {
return n
}
+func (m *BuildkitVersion) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Package)
+ if l > 0 {
+ n += 1 + l + sovWorker(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovWorker(uint64(l))
+ }
+ l = len(m.Revision)
+ if l > 0 {
+ n += 1 + l + sovWorker(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
func sovWorker(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -674,6 +837,42 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field BuildkitVersion", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthWorker
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.BuildkitVersion == nil {
+ m.BuildkitVersion = &BuildkitVersion{}
+ }
+ if err := m.BuildkitVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipWorker(dAtA[iNdEx:])
@@ -837,6 +1036,153 @@ func (m *GCPolicy) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *BuildkitVersion) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: BuildkitVersion: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: BuildkitVersion: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Package", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthWorker
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Package = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthWorker
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowWorker
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthWorker
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthWorker
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Revision = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipWorker(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthWorker
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipWorker(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/api/types/worker.proto b/api/types/worker.proto
index 82dd7ad65145..476fcc62e104 100644
--- a/api/types/worker.proto
+++ b/api/types/worker.proto
@@ -14,6 +14,7 @@ message WorkerRecord {
map Labels = 2;
repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
repeated GCPolicy GCPolicy = 4;
+ BuildkitVersion BuildkitVersion = 5;
}
message GCPolicy {
@@ -22,3 +23,9 @@ message GCPolicy {
int64 keepBytes = 3;
repeated string filters = 4;
}
+
+message BuildkitVersion {
+ string package = 1;
+ string version = 2;
+ string revision = 3;
+}
diff --git a/cache/blobs.go b/cache/blobs.go
index 8d2beefd0654..716be9093471 100644
--- a/cache/blobs.go
+++ b/cache/blobs.go
@@ -1,19 +1,15 @@
package cache
import (
- "compress/gzip"
"context"
"fmt"
- "io"
"os"
"strconv"
- "github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/diff/walking"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/mount"
- "github.com/klauspost/compress/zstd"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/flightcontrol"
@@ -40,6 +36,14 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo
if _, ok := leases.FromContext(ctx); !ok {
return errors.Errorf("missing lease requirement for computeBlobChain")
}
+ if !createIfNeeded {
+ sr.mu.Lock()
+ if sr.equalMutable != nil {
+ sr.mu.Unlock()
+ return nil
+ }
+ sr.mu.Unlock()
+ }
if err := sr.Finalize(ctx); err != nil {
return err
@@ -57,8 +61,6 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo
return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter)
}
-type compressor func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error)
-
func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}) error {
eg, ctx := errgroup.WithContext(ctx)
switch sr.kind() {
@@ -92,28 +94,8 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
return nil, errors.WithStack(ErrNoBlobs)
}
- var mediaType string
- var compressorFunc compressor
- var finalize func(context.Context, content.Store) (map[string]string, error)
- switch comp.Type {
- case compression.Uncompressed:
- mediaType = ocispecs.MediaTypeImageLayer
- case compression.Gzip:
- compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) {
- return gzipWriter(comp)(dest)
- }
- mediaType = ocispecs.MediaTypeImageLayerGzip
- case compression.EStargz:
- compressorFunc, finalize = compressEStargz(comp)
- mediaType = ocispecs.MediaTypeImageLayerGzip
- case compression.Zstd:
- compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) {
- return zstdWriter(comp)(dest)
- }
- mediaType = ocispecs.MediaTypeImageLayer + "+zstd"
- default:
- return nil, errors.Errorf("unknown layer compression type: %q", comp.Type)
- }
+ compressorFunc, finalize := comp.Type.Compress(ctx, comp)
+ mediaType := comp.Type.MediaType()
var lowerRef *immutableRef
switch sr.kind() {
@@ -206,7 +188,7 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool
}
}
- if desc.Digest == "" && !isTypeWindows(sr) && (comp.Type == compression.Zstd || comp.Type == compression.EStargz) {
+ if desc.Digest == "" && !isTypeWindows(sr) && comp.Type.NeedsComputeDiffBySelf() {
// These compression types aren't supported by containerd differ. So try to compute diff on buildkit side.
// This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native).
// See also: https://github.com/containerd/containerd/issues/4263
@@ -433,7 +415,7 @@ func isTypeWindows(sr *immutableRef) bool {
// ensureCompression ensures the specified ref has the blob of the specified compression Type.
func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error {
- _, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) {
+ _, err := g.Do(ctx, fmt.Sprintf("%s-%s", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) {
desc, err := ref.ociDesc(ctx, ref.descHandlers, true)
if err != nil {
return nil, err
@@ -480,38 +462,3 @@ func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.
})
return err
}
-
-func gzipWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) {
- return func(dest io.Writer) (io.WriteCloser, error) {
- level := gzip.DefaultCompression
- if comp.Level != nil {
- level = *comp.Level
- }
- return gzip.NewWriterLevel(dest, level)
- }
-}
-
-func zstdWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) {
- return func(dest io.Writer) (io.WriteCloser, error) {
- level := zstd.SpeedDefault
- if comp.Level != nil {
- level = toZstdEncoderLevel(*comp.Level)
- }
- return zstd.NewWriter(dest, zstd.WithEncoderLevel(level))
- }
-}
-
-func toZstdEncoderLevel(level int) zstd.EncoderLevel {
- // map zstd compression levels to go-zstd levels
- // once we also have c based implementation move this to helper pkg
- if level < 0 {
- return zstd.SpeedDefault
- } else if level < 3 {
- return zstd.SpeedFastest
- } else if level < 7 {
- return zstd.SpeedDefault
- } else if level < 9 {
- return zstd.SpeedBetterCompression
- }
- return zstd.SpeedBestCompression
-}
diff --git a/cache/blobs_linux.go b/cache/blobs_linux.go
index fcb8850a02a9..ce41275e6b74 100644
--- a/cache/blobs_linux.go
+++ b/cache/blobs_linux.go
@@ -12,6 +12,7 @@ import (
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/mount"
"github.com/moby/buildkit/util/bklog"
+ "github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/overlay"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@@ -24,7 +25,7 @@ var emptyDesc = ocispecs.Descriptor{}
// diff between lower and upper snapshot. If the passed mounts cannot
// be computed (e.g. because the mounts aren't overlayfs), it returns
// an error.
-func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) {
+func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) {
// Get upperdir location if mounts are overlayfs that can be processed by this differ.
upperdir, err := overlay.GetUpperdir(lower, upper)
if err != nil {
@@ -57,11 +58,14 @@ func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper
if err != nil {
return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream")
}
- err = overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower)
- compressed.Close()
- if err != nil {
+ // Close ensure compressorFunc does some finalization works.
+ defer compressed.Close()
+ if err := overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower); err != nil {
return emptyDesc, false, errors.Wrap(err, "failed to write compressed diff")
}
+ if err := compressed.Close(); err != nil {
+ return emptyDesc, false, errors.Wrap(err, "failed to close compressed diff writer")
+ }
if labels == nil {
labels = map[string]string{}
}
diff --git a/cache/blobs_nolinux.go b/cache/blobs_nolinux.go
index 2ccee770e2a8..1567768c1939 100644
--- a/cache/blobs_nolinux.go
+++ b/cache/blobs_nolinux.go
@@ -6,11 +6,12 @@ package cache
import (
"context"
+ "github.com/moby/buildkit/util/compression"
"github.com/containerd/containerd/mount"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
-func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) {
+func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compression.Compressor) (_ ocispecs.Descriptor, ok bool, err error) {
return ocispecs.Descriptor{}, true, errors.Errorf("overlayfs-based diff computing is unsupported")
}
diff --git a/cache/compression.go b/cache/compression.go
new file mode 100644
index 000000000000..bede8d932278
--- /dev/null
+++ b/cache/compression.go
@@ -0,0 +1,16 @@
+//go:build !nydus
+// +build !nydus
+
+package cache
+
+import (
+ "context"
+
+ "github.com/containerd/containerd/content"
+ "github.com/moby/buildkit/cache/config"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
+ return refCfg.Compression.Force
+}
diff --git a/cache/compression_nydus.go b/cache/compression_nydus.go
new file mode 100644
index 000000000000..48b61a4b36ff
--- /dev/null
+++ b/cache/compression_nydus.go
@@ -0,0 +1,147 @@
+//go:build nydus
+// +build nydus
+
+package cache
+
+import (
+ "compress/gzip"
+ "context"
+ "encoding/json"
+ "io"
+
+ "github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/errdefs"
+ "github.com/moby/buildkit/cache/config"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/util/compression"
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+
+ nydusify "github.com/containerd/nydus-snapshotter/pkg/converter"
+)
+
+func init() {
+ additionalAnnotations = append(
+ additionalAnnotations,
+ nydusify.LayerAnnotationNydusBlob, nydusify.LayerAnnotationNydusBootstrap, nydusify.LayerAnnotationNydusBlobIDs,
+ )
+}
+
+// Nydus compression type can't be mixed with other compression types in the same image,
+// so if `source` is this kind of layer, but the target is other compression type, we
+// should do the forced compression.
+func needsForceCompression(ctx context.Context, cs content.Store, source ocispecs.Descriptor, refCfg config.RefConfig) bool {
+ if refCfg.Compression.Force {
+ return true
+ }
+ isNydusBlob, _ := compression.Nydus.Is(ctx, cs, source)
+ if refCfg.Compression.Type == compression.Nydus {
+ return !isNydusBlob
+ }
+ return isNydusBlob
+}
+
+// MergeNydus does two steps:
+// 1. Extracts nydus bootstrap from nydus format (nydus blob + nydus bootstrap) for each layer.
+// 2. Merge all nydus bootstraps into a final bootstrap (will as an extra layer).
+// The nydus bootstrap size is very small, so the merge operation is fast.
+func MergeNydus(ctx context.Context, ref ImmutableRef, comp compression.Config, s session.Group) (*ocispecs.Descriptor, error) {
+ iref, ok := ref.(*immutableRef)
+ if !ok {
+ return nil, errors.Errorf("unsupported ref type %T", ref)
+ }
+ refs := iref.layerChain()
+ if len(refs) == 0 {
+ return nil, errors.Errorf("refs can't be empty")
+ }
+
+ // Extracts nydus bootstrap from nydus format for each layer.
+ var cm *cacheManager
+ layers := []nydusify.Layer{}
+ blobIDs := []string{}
+ for _, ref := range refs {
+ blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, comp, s)
+ if err != nil {
+ return nil, errors.Wrapf(err, "get compression blob %q", comp.Type)
+ }
+ ra, err := ref.cm.ContentStore.ReaderAt(ctx, blobDesc)
+ if err != nil {
+ return nil, errors.Wrapf(err, "get reader for compression blob %q", comp.Type)
+ }
+ defer ra.Close()
+ if cm == nil {
+ cm = ref.cm
+ }
+ blobIDs = append(blobIDs, blobDesc.Digest.Hex())
+ layers = append(layers, nydusify.Layer{
+ Digest: blobDesc.Digest,
+ ReaderAt: ra,
+ })
+ }
+
+ // Merge all nydus bootstraps into a final nydus bootstrap.
+ pr, pw := io.Pipe()
+ go func() {
+ defer pw.Close()
+ if _, err := nydusify.Merge(ctx, layers, pw, nydusify.MergeOption{
+ WithTar: true,
+ }); err != nil {
+ pw.CloseWithError(errors.Wrapf(err, "merge nydus bootstrap"))
+ }
+ }()
+
+ // Compress final nydus bootstrap to tar.gz and write into content store.
+ cw, err := content.OpenWriter(ctx, cm.ContentStore, content.WithRef("nydus-merge-"+iref.getChainID().String()))
+ if err != nil {
+ return nil, errors.Wrap(err, "open content store writer")
+ }
+ defer cw.Close()
+
+ gw := gzip.NewWriter(cw)
+ uncompressedDgst := digest.SHA256.Digester()
+ compressed := io.MultiWriter(gw, uncompressedDgst.Hash())
+ if _, err := io.Copy(compressed, pr); err != nil {
+ return nil, errors.Wrapf(err, "copy bootstrap targz into content store")
+ }
+ if err := gw.Close(); err != nil {
+ return nil, errors.Wrap(err, "close gzip writer")
+ }
+
+ compressedDgst := cw.Digest()
+ if err := cw.Commit(ctx, 0, compressedDgst, content.WithLabels(map[string]string{
+ containerdUncompressed: uncompressedDgst.Digest().String(),
+ })); err != nil {
+ if !errdefs.IsAlreadyExists(err) {
+ return nil, errors.Wrap(err, "commit to content store")
+ }
+ }
+ if err := cw.Close(); err != nil {
+ return nil, errors.Wrap(err, "close content store writer")
+ }
+
+ info, err := cm.ContentStore.Info(ctx, compressedDgst)
+ if err != nil {
+ return nil, errors.Wrap(err, "get info from content store")
+ }
+
+ blobIDsBytes, err := json.Marshal(blobIDs)
+ if err != nil {
+ return nil, errors.Wrap(err, "marshal blob ids")
+ }
+
+ desc := ocispecs.Descriptor{
+ Digest: compressedDgst,
+ Size: info.Size,
+ MediaType: ocispecs.MediaTypeImageLayerGzip,
+ Annotations: map[string]string{
+ containerdUncompressed: uncompressedDgst.Digest().String(),
+ // Use this annotation to identify nydus bootstrap layer.
+ nydusify.LayerAnnotationNydusBootstrap: "true",
+ // Track all blob digests for nydus snapshotter.
+ nydusify.LayerAnnotationNydusBlobIDs: string(blobIDsBytes),
+ },
+ }
+
+ return &desc, nil
+}
diff --git a/cache/contenthash/checksum.go b/cache/contenthash/checksum.go
index a59523dd2956..dcf424a6b4fc 100644
--- a/cache/contenthash/checksum.go
+++ b/cache/contenthash/checksum.go
@@ -11,13 +11,13 @@ import (
"strings"
"sync"
- "github.com/docker/docker/pkg/fileutils"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/locker"
+ "github.com/moby/patternmatcher"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
@@ -79,8 +79,8 @@ type includedPath struct {
path string
record *CacheRecord
included bool
- includeMatchInfo fileutils.MatchInfo
- excludeMatchInfo fileutils.MatchInfo
+ includeMatchInfo patternmatcher.MatchInfo
+ excludeMatchInfo patternmatcher.MatchInfo
}
type cacheManager struct {
@@ -496,17 +496,17 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
endsInSep := len(p) != 0 && p[len(p)-1] == filepath.Separator
p = keyPath(p)
- var includePatternMatcher *fileutils.PatternMatcher
+ var includePatternMatcher *patternmatcher.PatternMatcher
if len(opts.IncludePatterns) != 0 {
- includePatternMatcher, err = fileutils.NewPatternMatcher(opts.IncludePatterns)
+ includePatternMatcher, err = patternmatcher.New(opts.IncludePatterns)
if err != nil {
return nil, errors.Wrapf(err, "invalid includepatterns: %s", opts.IncludePatterns)
}
}
- var excludePatternMatcher *fileutils.PatternMatcher
+ var excludePatternMatcher *patternmatcher.PatternMatcher
if len(opts.ExcludePatterns) != 0 {
- excludePatternMatcher, err = fileutils.NewPatternMatcher(opts.ExcludePatterns)
+ excludePatternMatcher, err = patternmatcher.New(opts.ExcludePatterns)
if err != nil {
return nil, errors.Wrapf(err, "invalid excludepatterns: %s", opts.ExcludePatterns)
}
@@ -695,21 +695,21 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o
func shouldIncludePath(
candidate string,
- includePatternMatcher *fileutils.PatternMatcher,
- excludePatternMatcher *fileutils.PatternMatcher,
+ includePatternMatcher *patternmatcher.PatternMatcher,
+ excludePatternMatcher *patternmatcher.PatternMatcher,
maybeIncludedPath *includedPath,
parentDir *includedPath,
) (bool, error) {
var (
m bool
- matchInfo fileutils.MatchInfo
+ matchInfo patternmatcher.MatchInfo
err error
)
if includePatternMatcher != nil {
if parentDir != nil {
m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, parentDir.includeMatchInfo)
} else {
- m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{})
+ m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{})
}
if err != nil {
return false, errors.Wrap(err, "failed to match includepatterns")
@@ -724,7 +724,7 @@ func shouldIncludePath(
if parentDir != nil {
m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, parentDir.excludeMatchInfo)
} else {
- m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{})
+ m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, patternmatcher.MatchInfo{})
}
if err != nil {
return false, errors.Wrap(err, "failed to match excludepatterns")
@@ -799,7 +799,7 @@ func splitWildcards(p string) (d1, d2 string) {
p2 = append(p2, p)
}
}
- return filepath.Join(p1...), filepath.Join(p2...)
+ return path.Join(p1...), path.Join(p2...)
}
func containsWildcards(name string) bool {
@@ -1015,7 +1015,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
Type: CacheRecordTypeSymlink,
Linkname: filepath.ToSlash(link),
}
- k := []byte(filepath.Join("/", filepath.ToSlash(p)))
+ k := []byte(path.Join("/", filepath.ToSlash(p)))
k = convertPathToKey(k)
txn.Insert(k, cr)
return nil
@@ -1024,15 +1024,15 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
return err
}
- err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error {
+ err = filepath.Walk(parentPath, func(itemPath string, fi os.FileInfo, err error) error {
if err != nil {
- return errors.Wrapf(err, "failed to walk %s", path)
+ return errors.Wrapf(err, "failed to walk %s", itemPath)
}
- rel, err := filepath.Rel(mp, path)
+ rel, err := filepath.Rel(mp, itemPath)
if err != nil {
return err
}
- k := []byte(filepath.Join("/", filepath.ToSlash(rel)))
+ k := []byte(path.Join("/", filepath.ToSlash(rel)))
if string(k) == "/" {
k = []byte{}
}
@@ -1043,7 +1043,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
}
if fi.Mode()&os.ModeSymlink != 0 {
cr.Type = CacheRecordTypeSymlink
- link, err := os.Readlink(path)
+ link, err := os.Readlink(itemPath)
if err != nil {
return err
}
diff --git a/cache/contenthash/checksum_test.go b/cache/contenthash/checksum_test.go
index 713c7a560913..cfdbfe2b465e 100644
--- a/cache/contenthash/checksum_test.go
+++ b/cache/contenthash/checksum_test.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -43,14 +42,12 @@ const (
func TestChecksumSymlinkNoParentScan(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD aa dir",
@@ -72,14 +69,12 @@ func TestChecksumSymlinkNoParentScan(t *testing.T) {
func TestChecksumHardlinks(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD abc dir",
@@ -155,14 +150,12 @@ func TestChecksumHardlinks(t *testing.T) {
func TestChecksumWildcardOrFilter(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD bar file data1",
@@ -212,14 +205,16 @@ func TestChecksumWildcardOrFilter(t *testing.T) {
func TestChecksumWildcardWithBadMountable(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
+
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ref := createRef(t, cm, nil)
@@ -232,14 +227,12 @@ func TestChecksumWildcardWithBadMountable(t *testing.T) {
func TestSymlinksNoFollow(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD target file data0",
@@ -291,14 +284,12 @@ func TestSymlinksNoFollow(t *testing.T) {
func TestChecksumBasicFile(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD foo file data0",
@@ -449,14 +440,12 @@ func TestChecksumIncludeExclude(t *testing.T) {
func testChecksumIncludeExclude(t *testing.T, wildcard bool) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD foo file data0",
@@ -584,14 +573,12 @@ func testChecksumIncludeExclude(t *testing.T, wildcard bool) {
func TestChecksumIncludeDoubleStar(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD prefix dir",
@@ -652,14 +639,12 @@ func TestChecksumIncludeDoubleStar(t *testing.T) {
func TestChecksumIncludeSymlink(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD data dir",
@@ -725,14 +710,16 @@ func TestChecksumIncludeSymlink(t *testing.T) {
func TestHandleChange(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
+
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD foo file data0",
@@ -803,14 +790,16 @@ func TestHandleChange(t *testing.T) {
func TestHandleRecursiveDir(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
+
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD d0 dir",
@@ -852,14 +841,16 @@ func TestHandleRecursiveDir(t *testing.T) {
func TestChecksumUnorderedFiles(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
+
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD d0 dir",
@@ -905,14 +896,12 @@ func TestChecksumUnorderedFiles(t *testing.T) {
func TestSymlinkInPathScan(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD d0 dir",
@@ -936,14 +925,12 @@ func TestSymlinkInPathScan(t *testing.T) {
func TestSymlinkNeedsScan(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD c0 dir",
@@ -969,14 +956,12 @@ func TestSymlinkNeedsScan(t *testing.T) {
func TestSymlinkAbsDirSuffix(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD c0 dir",
@@ -996,14 +981,12 @@ func TestSymlinkAbsDirSuffix(t *testing.T) {
func TestSymlinkThroughParent(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD lib dir",
@@ -1051,14 +1034,16 @@ func TestSymlinkThroughParent(t *testing.T) {
func TestSymlinkInPathHandleChange(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
+
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD d1 dir",
@@ -1114,14 +1099,12 @@ func TestSymlinkInPathHandleChange(t *testing.T) {
func TestPersistence(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-state")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- cm, closeBolt := setupCacheManager(t, tmpdir, "native", snapshotter)
- defer cm.Close()
+ cm, cleanup := setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ch := []string{
"ADD foo file data0",
@@ -1155,12 +1138,10 @@ func TestPersistence(t *testing.T) {
time.Sleep(100 * time.Millisecond) // saving happens on the background
// we can't close snapshotter and open it twice (especially, its internal bbolt store)
- cm.Close()
- closeBolt()
+ cleanup()
getDefaultManager().lru.Purge()
- cm, closeBolt = setupCacheManager(t, tmpdir, "native", snapshotter)
- defer closeBolt()
- defer cm.Close()
+ cm, cleanup = setupCacheManager(t, tmpdir, "native", snapshotter)
+ t.Cleanup(cleanup)
ref, err = cm.Get(context.TODO(), id, nil)
require.NoError(t, err)
@@ -1229,6 +1210,8 @@ func setupCacheManager(t *testing.T, tmpdir string, snapshotterName string, snap
return cm, func() {
db.Close()
+ md.Close()
+ cm.Close()
}
}
diff --git a/cache/contenthash/filehash.go b/cache/contenthash/filehash.go
index 0b5267101b03..246f8f7f1c80 100644
--- a/cache/contenthash/filehash.go
+++ b/cache/contenthash/filehash.go
@@ -51,6 +51,8 @@ func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) {
hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead
hdr.Devmajor = stat.Devmajor
hdr.Devminor = stat.Devminor
+ hdr.Uid = int(stat.Uid)
+ hdr.Gid = int(stat.Gid)
if len(stat.Xattrs) > 0 {
hdr.PAXRecords = make(map[string]string, len(stat.Xattrs))
diff --git a/cache/contenthash/tarsum.go b/cache/contenthash/tarsum.go
index 182c46118428..456e1ad7f12a 100644
--- a/cache/contenthash/tarsum.go
+++ b/cache/contenthash/tarsum.go
@@ -37,10 +37,10 @@ func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
pax := h.PAXRecords
- if len(h.Xattrs) > 0 { //nolint deprecated
+ if len(h.Xattrs) > 0 { //nolint:staticcheck // field deprecated in stdlib
if pax == nil {
pax = map[string]string{}
- for k, v := range h.Xattrs { //nolint deprecated
+ for k, v := range h.Xattrs { //nolint:staticcheck // field deprecated in stdlib
pax["SCHILY.xattr."+k] = v
}
}
diff --git a/cache/converter.go b/cache/converter.go
index a7e4df193aff..f19412b7086a 100644
--- a/cache/converter.go
+++ b/cache/converter.go
@@ -7,120 +7,46 @@ import (
"io"
"sync"
- cdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
- "github.com/containerd/containerd/images"
"github.com/containerd/containerd/images/converter"
"github.com/containerd/containerd/labels"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/compression"
+ "github.com/moby/buildkit/util/iohelper"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
-// needsConversion indicates whether a conversion is needed for the specified descriptor to
-// be the compressionType.
-func needsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (bool, error) {
- mediaType := desc.MediaType
- switch compressionType {
- case compression.Uncompressed:
- if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed {
- return false, nil
- }
- case compression.Gzip:
- esgz, err := isEStargz(ctx, cs, desc.Digest)
- if err != nil {
- return false, err
- }
- if (!images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip) && !esgz {
- return false, nil
- }
- case compression.Zstd:
- if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd {
- return false, nil
- }
- case compression.EStargz:
- esgz, err := isEStargz(ctx, cs, desc.Digest)
- if err != nil {
- return false, err
- }
- if !images.IsLayerType(mediaType) || esgz {
- return false, nil
- }
- default:
- return false, fmt.Errorf("unknown compression type during conversion: %q", compressionType)
- }
- return true, nil
-}
-
// getConverter returns converter function according to the specified compression type.
// If no conversion is needed, this returns nil without error.
func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, comp compression.Config) (converter.ConvertFunc, error) {
- if needs, err := needsConversion(ctx, cs, desc, comp.Type); err != nil {
+ if needs, err := comp.Type.NeedsConversion(ctx, cs, desc); err != nil {
return nil, errors.Wrapf(err, "failed to determine conversion needs")
} else if !needs {
// No conversion. No need to return an error here.
return nil, nil
}
- c := conversion{target: comp}
-
- from := compression.FromMediaType(desc.MediaType)
- switch from {
- case compression.Uncompressed:
- case compression.Gzip, compression.Zstd:
- c.decompress = func(ctx context.Context, desc ocispecs.Descriptor) (r io.ReadCloser, err error) {
- ra, err := cs.ReaderAt(ctx, desc)
- if err != nil {
- return nil, err
- }
- esgz, err := isEStargz(ctx, cs, desc.Digest)
- if err != nil {
- return nil, err
- } else if esgz {
- r, err = decompressEStargz(io.NewSectionReader(ra, 0, ra.Size()))
- if err != nil {
- return nil, err
- }
- } else {
- r, err = cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
- if err != nil {
- return nil, err
- }
- }
- return &readCloser{r, ra.Close}, nil
- }
- default:
- return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType)
+ from, err := compression.FromMediaType(desc.MediaType)
+ if err != nil {
+ return nil, err
}
- switch comp.Type {
- case compression.Uncompressed:
- case compression.Gzip:
- c.compress = gzipWriter(comp)
- case compression.Zstd:
- c.compress = zstdWriter(comp)
- case compression.EStargz:
- compressorFunc, finalize := compressEStargz(comp)
- c.compress = func(w io.Writer) (io.WriteCloser, error) {
- return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip)
- }
- c.finalize = finalize
- default:
- return nil, errors.Errorf("unknown target compression type during conversion: %q", comp.Type)
- }
+ c := conversion{target: comp}
+ c.compress, c.finalize = comp.Type.Compress(ctx, comp)
+ c.decompress = from.Decompress
return (&c).convert, nil
}
type conversion struct {
target compression.Config
- decompress func(context.Context, ocispecs.Descriptor) (io.ReadCloser, error)
- compress func(w io.Writer) (io.WriteCloser, error)
- finalize func(context.Context, content.Store) (map[string]string, error)
+ decompress compression.Decompressor
+ compress compression.Compressor
+ finalize compression.Finalizer
}
var bufioPool = sync.Pool{
@@ -151,34 +77,20 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec
bufW = bufio.NewWriterSize(w, 128*1024)
}
defer bufioPool.Put(bufW)
- var zw io.WriteCloser = &nopWriteCloser{bufW}
- if c.compress != nil {
- zw, err = c.compress(zw)
- if err != nil {
- return nil, err
- }
+ zw, err := c.compress(&iohelper.NopWriteCloser{Writer: bufW}, c.target.Type.MediaType())
+ if err != nil {
+ return nil, err
}
zw = &onceWriteCloser{WriteCloser: zw}
defer zw.Close()
// convert this layer
diffID := digest.Canonical.Digester()
- var rdr io.Reader
- if c.decompress == nil {
- ra, err := cs.ReaderAt(ctx, desc)
- if err != nil {
- return nil, err
- }
- defer ra.Close()
- rdr = io.NewSectionReader(ra, 0, ra.Size())
- } else {
- rc, err := c.decompress(ctx, desc)
- if err != nil {
- return nil, err
- }
- defer rc.Close()
- rdr = rc
+ rdr, err := c.decompress(ctx, cs, desc)
+ if err != nil {
+ return nil, err
}
+ defer rdr.Close()
if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil {
return nil, err
}
@@ -201,7 +113,7 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec
}
newDesc := desc
- newDesc.MediaType = c.target.Type.DefaultMediaType()
+ newDesc.MediaType = c.target.Type.MediaType()
newDesc.Digest = info.Digest
newDesc.Size = info.Size
newDesc.Annotations = map[string]string{labels.LabelUncompressed: diffID.Digest().String()}
@@ -217,28 +129,6 @@ func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispec
return &newDesc, nil
}
-type readCloser struct {
- io.ReadCloser
- closeFunc func() error
-}
-
-func (rc *readCloser) Close() error {
- err1 := rc.ReadCloser.Close()
- err2 := rc.closeFunc()
- if err1 != nil {
- return errors.Wrapf(err1, "failed to close: %v", err2)
- }
- return err2
-}
-
-type nopWriteCloser struct {
- io.Writer
-}
-
-func (w *nopWriteCloser) Close() error {
- return nil
-}
-
type onceWriteCloser struct {
io.WriteCloser
closeOnce sync.Once
diff --git a/cache/filelist.go b/cache/filelist.go
new file mode 100644
index 000000000000..c2c7921fd5db
--- /dev/null
+++ b/cache/filelist.go
@@ -0,0 +1,90 @@
+package cache
+
+import (
+ "archive/tar"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "path"
+ "sort"
+
+ cdcompression "github.com/containerd/containerd/archive/compression"
+ "github.com/moby/buildkit/session"
+)
+
+const keyFileList = "filelist"
+
+// FileList returns an ordered list of files present in the cache record that were
+// changed compared to the parent. The paths of the files are in same format as they
+// are in the tar stream (AUFS whiteout format). If the reference does not have a
+// a blob associated with it, the list is empty.
+func (sr *immutableRef) FileList(ctx context.Context, s session.Group) ([]string, error) {
+ res, err := g.Do(ctx, fmt.Sprintf("filelist-%s", sr.ID()), func(ctx context.Context) (interface{}, error) {
+ dt, err := sr.GetExternal(keyFileList)
+ if err == nil && dt != nil {
+ var files []string
+ if err := json.Unmarshal(dt, &files); err != nil {
+ return nil, err
+ }
+ return files, nil
+ }
+
+ if sr.getBlob() == "" {
+ return nil, nil
+ }
+
+ // lazy blobs need to be pulled first
+ if err := sr.Extract(ctx, s); err != nil {
+ return nil, err
+ }
+
+ desc, err := sr.ociDesc(ctx, sr.descHandlers, false)
+ if err != nil {
+ return nil, err
+ }
+
+ ra, err := sr.cm.ContentStore.ReaderAt(ctx, desc)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
+ if err != nil {
+ return nil, err
+ }
+ defer r.Close()
+
+ var files []string
+
+ rdr := tar.NewReader(r)
+ for {
+ hdr, err := rdr.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ name := path.Clean(hdr.Name)
+ files = append(files, name)
+ }
+ sort.Strings(files)
+
+ dt, err = json.Marshal(files)
+ if err != nil {
+ return nil, err
+ }
+ if err := sr.SetExternal(keyFileList, dt); err != nil {
+ return nil, err
+ }
+ return files, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ if res == nil {
+ return nil, nil
+ }
+ return res.([]string), nil
+}
diff --git a/cache/manager.go b/cache/manager.go
index 8f91d3c7a0a6..d579a6007ba0 100644
--- a/cache/manager.go
+++ b/cache/manager.go
@@ -222,10 +222,8 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
id := identity.NewID()
snapshotID := chainID.String()
- blobOnly := true
if link != nil {
snapshotID = link.getSnapshotID()
- blobOnly = link.getBlobOnly()
go link.Release(context.TODO())
}
@@ -289,7 +287,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
rec.queueChainID(chainID)
rec.queueBlobChainID(blobChainID)
rec.queueSnapshotID(snapshotID)
- rec.queueBlobOnly(blobOnly)
+ rec.queueBlobOnly(true)
rec.queueMediaType(desc.MediaType)
rec.queueBlobSize(desc.Size)
rec.appendURLs(desc.URLs)
@@ -301,7 +299,14 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
cm.records[id] = rec
- return rec.ref(true, descHandlers, nil), nil
+ ref := rec.ref(true, descHandlers, nil)
+ if s := unlazySessionOf(opts...); s != nil {
+ if err := ref.unlazy(ctx, ref.descHandlers, ref.progress, s, true); err != nil {
+ return nil, err
+ }
+ }
+
+ return ref, nil
}
// init loads all snapshots from metadata state and tries to load the records
@@ -458,6 +463,13 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt
cacheMetadata: md,
}
+ // TODO:(sipsma) this is kludge to deal with a bug in v0.10.{0,1} where
+ // merge and diff refs didn't have committed set to true:
+ // https://github.com/moby/buildkit/issues/2740
+ if kind := rec.kind(); kind == Merge || kind == Diff {
+ rec.mutable = false
+ }
+
// the record was deleted but we crashed before data on disk was removed
if md.getDeleted() {
if err := rec.remove(ctx, true); err != nil {
@@ -496,6 +508,11 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt
}
func (cm *cacheManager) parentsOf(ctx context.Context, md *cacheMetadata, opts ...RefOption) (ps parentRefs, rerr error) {
+ defer func() {
+ if rerr != nil {
+ ps.release(context.TODO())
+ }
+ }()
if parentID := md.getParent(); parentID != "" {
p, err := cm.get(ctx, parentID, nil, append(opts, NoUpdateLastUsed))
if err != nil {
@@ -794,7 +811,7 @@ func (cm *cacheManager) createMergeRef(ctx context.Context, parents parentRefs,
}
rec.queueSnapshotID(snapshotID)
-
+ rec.queueCommitted(true)
if err := rec.commitMetadata(); err != nil {
return nil, err
}
@@ -969,6 +986,7 @@ func (cm *cacheManager) createDiffRef(ctx context.Context, parents parentRefs, d
}
rec.queueSnapshotID(snapshotID)
+ rec.queueCommitted(true)
if err := rec.commitMetadata(); err != nil {
return nil, err
}
diff --git a/cache/manager_test.go b/cache/manager_test.go
index 78387a023b10..cd58a4042b29 100644
--- a/cache/manager_test.go
+++ b/cache/manager_test.go
@@ -8,7 +8,6 @@ import (
"encoding/binary"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -43,6 +42,7 @@ import (
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
+ "github.com/moby/buildkit/util/iohelper"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/winlayers"
digest "github.com/opencontainers/go-digest"
@@ -65,7 +65,7 @@ type cmOut struct {
cs content.Store
}
-func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() error, err error) {
+func newCacheManager(ctx context.Context, t *testing.T, opt cmOpt) (co *cmOut, cleanup func(), err error) {
ns, ok := namespaces.Namespace(ctx)
if !ok {
return nil, nil, errors.Errorf("namespace required for test")
@@ -75,31 +75,24 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func()
opt.snapshotterName = "native"
}
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- if err != nil {
- return nil, nil, err
- }
+ tmpdir := t.TempDir()
defers := make([]func() error, 0)
- cleanup = func() error {
+ cleanup = func() {
var err error
for i := range defers {
if err1 := defers[len(defers)-1-i](); err1 != nil && err == nil {
err = err1
}
}
- return err
+ require.NoError(t, err)
}
defer func() {
if err != nil && cleanup != nil {
cleanup()
}
}()
- if opt.tmpdir == "" {
- defers = append(defers, func() error {
- return os.RemoveAll(tmpdir)
- })
- } else {
+ if opt.tmpdir != "" {
os.RemoveAll(tmpdir)
tmpdir = opt.tmpdir
}
@@ -142,6 +135,9 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func()
if err != nil {
return nil, nil, err
}
+ defers = append(defers, func() error {
+ return md.Close()
+ })
cm, err := NewManager(ManagerOpt{
Snapshotter: snapshot.FromContainerdSnapshotter(opt.snapshotterName, containerdsnapshot.NSSnapshotter(ns, mdb.Snapshotter(opt.snapshotterName)), nil),
@@ -156,6 +152,10 @@ func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func()
if err != nil {
return nil, nil, err
}
+ defers = append(defers, func() error {
+ return cm.Close()
+ })
+
return &cmOut{
manager: cm,
lm: lm,
@@ -167,22 +167,20 @@ func TestSharableMountPoolCleanup(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
// Emulate the situation where the pool dir is dirty
mountPoolDir := filepath.Join(tmpdir, "cachemounts")
require.NoError(t, os.MkdirAll(mountPoolDir, 0700))
- _, err = ioutil.TempDir(mountPoolDir, "buildkit")
+ _, err := os.MkdirTemp(mountPoolDir, "buildkit")
require.NoError(t, err)
// Initialize cache manager and check if pool is cleaned up
- _, cleanup, err := newCacheManager(ctx, cmOpt{
+ _, cleanup, err := newCacheManager(ctx, t, cmOpt{
tmpdir: tmpdir,
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
files, err := os.ReadDir(mountPoolDir)
require.NoError(t, err)
@@ -194,20 +192,21 @@ func TestManager(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
+ t.Cleanup(cleanup)
- defer cleanup()
cm := co.manager
_, err = cm.Get(ctx, "foobar", nil)
@@ -317,7 +316,7 @@ func TestManager(t *testing.T) {
err = cm.Close()
require.NoError(t, err)
- dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
}
@@ -326,19 +325,17 @@ func TestLazyGetByBlob(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
// Test for #2226 https://github.com/moby/buildkit/issues/2226, create lazy blobs with the same diff ID but
@@ -371,19 +368,17 @@ func TestMergeBlobchainID(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
// create a merge ref that has 3 inputs, with each input being a 3 layer blob chain
@@ -444,20 +439,17 @@ func TestSnapshotExtract(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
-
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
@@ -487,7 +479,7 @@ func TestSnapshotExtract(t *testing.T) {
require.Equal(t, false, !snap2.(*immutableRef).getBlobOnly())
- dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
@@ -499,7 +491,7 @@ func TestSnapshotExtract(t *testing.T) {
require.Equal(t, true, !snap.(*immutableRef).getBlobOnly())
require.Equal(t, true, !snap2.(*immutableRef).getBlobOnly())
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
@@ -512,7 +504,7 @@ func TestSnapshotExtract(t *testing.T) {
require.Equal(t, len(buf.all), 0)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
@@ -530,7 +522,7 @@ func TestSnapshotExtract(t *testing.T) {
checkDiskUsage(ctx, t, cm, 2, 0)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
@@ -553,7 +545,7 @@ func TestSnapshotExtract(t *testing.T) {
require.Equal(t, len(buf.all), 1)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 1, len(dirs))
@@ -569,7 +561,7 @@ func TestSnapshotExtract(t *testing.T) {
checkDiskUsage(ctx, t, cm, 0, 0)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
@@ -584,20 +576,17 @@ func TestExtractOnMutable(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
-
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
@@ -643,7 +632,7 @@ func TestExtractOnMutable(t *testing.T) {
require.NoError(t, err)
require.Equal(t, int64(len(b2)), size)
- dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 1, len(dirs))
@@ -664,7 +653,7 @@ func TestExtractOnMutable(t *testing.T) {
require.Equal(t, len(buf.all), 0)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
@@ -682,7 +671,7 @@ func TestExtractOnMutable(t *testing.T) {
require.Equal(t, len(buf.all), 2)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
@@ -693,20 +682,20 @@ func TestSetBlob(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
-
- defer cleanup()
+ t.Cleanup(cleanup)
ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary)
require.NoError(t, err)
@@ -866,20 +855,21 @@ func TestPrune(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
+ t.Cleanup(cleanup)
- defer cleanup()
cm := co.manager
active, err := cm.New(ctx, nil, nil)
@@ -896,7 +886,7 @@ func TestPrune(t *testing.T) {
checkDiskUsage(ctx, t, cm, 2, 0)
- dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err := os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
@@ -909,7 +899,7 @@ func TestPrune(t *testing.T) {
checkDiskUsage(ctx, t, cm, 2, 0)
require.Equal(t, len(buf.all), 0)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
@@ -927,7 +917,7 @@ func TestPrune(t *testing.T) {
checkDiskUsage(ctx, t, cm, 1, 0)
require.Equal(t, len(buf.all), 1)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 1, len(dirs))
@@ -967,7 +957,7 @@ func TestPrune(t *testing.T) {
checkDiskUsage(ctx, t, cm, 0, 0)
require.Equal(t, len(buf.all), 2)
- dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
+ dirs, err = os.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
}
@@ -977,14 +967,15 @@ func TestLazyCommit(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
@@ -1074,7 +1065,7 @@ func TestLazyCommit(t *testing.T) {
cleanup()
// we can't close snapshotter and open it twice (especially, its internal bbolt store)
- co, cleanup, err = newCacheManager(ctx, cmOpt{
+ co, cleanup, err = newCacheManager(ctx, t, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
@@ -1103,13 +1094,13 @@ func TestLazyCommit(t *testing.T) {
cleanup()
- co, cleanup, err = newCacheManager(ctx, cmOpt{
+ co, cleanup, err = newCacheManager(ctx, t, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm = co.manager
snap2, err = cm.Get(ctx, snap.ID(), nil)
@@ -1135,19 +1126,17 @@ func TestLoopLeaseContent(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary)
@@ -1252,19 +1241,17 @@ func TestSharingCompressionVariant(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
allCompressions := []compression.Type{compression.Uncompressed, compression.Gzip, compression.Zstd, compression.EStargz}
@@ -1359,7 +1346,7 @@ func testSharingCompressionVariant(ctx context.Context, t *testing.T, co *cmOut,
if err != nil {
return nil, "", err
}
- return cw, testCase.a.DefaultMediaType(), nil
+ return cw, testCase.a.MediaType(), nil
})
require.NoError(t, err)
contentBuffer := contentutil.NewBuffer()
@@ -1400,9 +1387,9 @@ func testSharingCompressionVariant(ctx context.Context, t *testing.T, co *cmOut,
// check if all compression variables are available on the both refs
checkCompression := func(desc ocispecs.Descriptor, compressionType compression.Type) {
- require.Equal(t, compressionType.DefaultMediaType(), desc.MediaType, "compression: %v", compressionType)
+ require.Equal(t, compressionType.MediaType(), desc.MediaType, "compression: %v", compressionType)
if compressionType == compression.EStargz {
- ok, err := isEStargz(ctx, co.cs, desc.Digest)
+ ok, err := compression.EStargz.Is(ctx, co.cs, desc.Digest)
require.NoError(t, err, "compression: %v", compressionType)
require.True(t, ok, "compression: %v", compressionType)
}
@@ -1467,7 +1454,7 @@ func ensurePrune(ctx context.Context, t *testing.T, cm Manager, pruneNum, maxRet
func getCompressor(w io.Writer, compressionType compression.Type, customized bool) (io.WriteCloser, error) {
switch compressionType {
case compression.Uncompressed:
- return nil, fmt.Errorf("compression is not requested: %v", compressionType)
+ return nil, errors.Errorf("compression is not requested: %v", compressionType)
case compression.Gzip:
if customized {
gz, _ := gzip.NewWriterLevel(w, gzip.NoCompression)
@@ -1495,7 +1482,7 @@ func getCompressor(w io.Writer, compressionType compression.Type, customized boo
}
pr.Close()
}()
- return &writeCloser{pw, func() error { <-done; return nil }}, nil
+ return &iohelper.WriteCloser{WriteCloser: pw, CloseFunc: func() error { <-done; return nil }}, nil
case compression.Zstd:
if customized {
skippableFrameMagic := []byte{0x50, 0x2a, 0x4d, 0x18}
@@ -1508,7 +1495,7 @@ func getCompressor(w io.Writer, compressionType compression.Type, customized boo
}
return zstd.NewWriter(w)
default:
- return nil, fmt.Errorf("unknown compression type: %q", compressionType)
+ return nil, errors.Errorf("unknown compression type: %q", compressionType)
}
}
@@ -1520,19 +1507,17 @@ func TestConversion(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
store := co.cs
// Preapre the original tar blob using archive/tar and tar command on the system
@@ -1547,7 +1532,7 @@ func TestConversion(t *testing.T) {
err = cw.Commit(ctx, 0, cw.Digest())
require.NoError(t, err)
- orgBlobBytesSys, orgDescSys, err := mapToSystemTarBlob(m)
+ orgBlobBytesSys, orgDescSys, err := mapToSystemTarBlob(t, m)
require.NoError(t, err)
cw, err = store.Writer(ctx, content.WithRef(fmt.Sprintf("write-test-blob-%s", orgDescSys.Digest)))
require.NoError(t, err)
@@ -1617,19 +1602,17 @@ func TestGetRemotes(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary)
@@ -1785,7 +1768,7 @@ func TestGetRemotes(t *testing.T) {
r := refChain[i]
isLazy, err := r.isLazy(egctx)
require.NoError(t, err)
- needs, err := needsConversion(ctx, co.cs, desc, compressionType)
+ needs, err := compressionType.NeedsConversion(ctx, co.cs, desc)
require.NoError(t, err)
if needs {
require.False(t, isLazy, "layer %q requires conversion so it must be unlazied", desc.Digest)
@@ -1917,19 +1900,17 @@ func TestNondistributableBlobs(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
@@ -2018,7 +1999,7 @@ func checkDescriptor(ctx context.Context, t *testing.T, cs content.Store, desc o
}
// Check annotation values are valid
- c := new(counter)
+ c := new(iohelper.Counter)
ra, err := cs.ReaderAt(ctx, desc)
if err != nil && errdefs.IsNotFound(err) {
return // lazy layer
@@ -2033,7 +2014,7 @@ func checkDescriptor(ctx context.Context, t *testing.T, cs content.Store, desc o
require.NoError(t, err)
require.Equal(t, diffID.Digest().String(), uncompressedDgst)
if compressionType == compression.EStargz {
- require.Equal(t, c.size(), uncompressedSize)
+ require.Equal(t, c.Size(), uncompressedSize)
}
}
@@ -2048,19 +2029,17 @@ func TestMergeOp(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
emptyMerge, err := cm.Merge(ctx, nil, nil)
@@ -2092,6 +2071,7 @@ func TestMergeOp(t *testing.T) {
singleMerge, err := cm.Merge(ctx, baseRefs[:1], nil)
require.NoError(t, err)
+ require.True(t, singleMerge.(*immutableRef).getCommitted())
m, err := singleMerge.Mount(ctx, true, nil)
require.NoError(t, err)
ms, unmount, err := m.Mount()
@@ -2112,6 +2092,7 @@ func TestMergeOp(t *testing.T) {
merge1, err := cm.Merge(ctx, baseRefs[:3], nil)
require.NoError(t, err)
+ require.True(t, merge1.(*immutableRef).getCommitted())
_, err = merge1.Mount(ctx, true, nil)
require.NoError(t, err)
size1, err := merge1.(*immutableRef).size(ctx)
@@ -2121,6 +2102,7 @@ func TestMergeOp(t *testing.T) {
merge2, err := cm.Merge(ctx, baseRefs[3:], nil)
require.NoError(t, err)
+ require.True(t, merge2.(*immutableRef).getCommitted())
_, err = merge2.Mount(ctx, true, nil)
require.NoError(t, err)
size2, err := merge2.(*immutableRef).size(ctx)
@@ -2136,6 +2118,7 @@ func TestMergeOp(t *testing.T) {
merge3, err := cm.Merge(ctx, []ImmutableRef{merge1, merge2}, nil)
require.NoError(t, err)
+ require.True(t, merge3.(*immutableRef).getCommitted())
require.NoError(t, merge1.Release(ctx))
require.NoError(t, merge2.Release(ctx))
_, err = merge3.Mount(ctx, true, nil)
@@ -2164,19 +2147,17 @@ func TestDiffOp(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
newLower, err := cm.New(ctx, nil, nil)
@@ -2243,6 +2224,20 @@ func TestDiffOp(t *testing.T) {
checkDiskUsage(ctx, t, cm, 0, 8)
require.NoError(t, cm.Prune(ctx, nil, client.PruneInfo{All: true}))
checkDiskUsage(ctx, t, cm, 0, 0)
+
+ // Test using nil as upper
+ newLower, err = cm.New(ctx, nil, nil)
+ require.NoError(t, err)
+ lowerB, err := newLower.Commit(ctx)
+ require.NoError(t, err)
+ diff, err = cm.Diff(ctx, lowerB, nil, nil)
+ require.NoError(t, err)
+ checkDiskUsage(ctx, t, cm, 2, 0)
+ require.NoError(t, lowerB.Release(ctx))
+ require.NoError(t, diff.Release(ctx))
+ checkDiskUsage(ctx, t, cm, 0, 2)
+ require.NoError(t, cm.Prune(ctx, nil, client.PruneInfo{All: true}))
+ checkDiskUsage(ctx, t, cm, 0, 0)
}
func TestLoadHalfFinalizedRef(t *testing.T) {
@@ -2254,20 +2249,21 @@ func TestLoadHalfFinalizedRef(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager.(*cacheManager)
mref, err := cm.New(ctx, nil, nil, CachePolicyRetain)
@@ -2303,15 +2299,15 @@ func TestLoadHalfFinalizedRef(t *testing.T) {
require.NoError(t, iref.Release(ctx))
require.NoError(t, cm.Close())
- require.NoError(t, cleanup())
+ cleanup()
- co, cleanup, err = newCacheManager(ctx, cmOpt{
+ co, cleanup, err = newCacheManager(ctx, t, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm = co.manager.(*cacheManager)
_, err = cm.GetMutable(ctx, mutRef.ID())
@@ -2334,19 +2330,17 @@ func TestMountReadOnly(t *testing.T) {
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
- tmpdir, err := ioutil.TempDir("", "cachemanager")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
- co, cleanup, err := newCacheManager(ctx, cmOpt{
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
snapshotter: snapshotter,
snapshotterName: "overlay",
})
require.NoError(t, err)
- defer cleanup()
+ t.Cleanup(cleanup)
cm := co.manager
mutRef, err := cm.New(ctx, nil, nil)
@@ -2395,6 +2389,65 @@ func TestMountReadOnly(t *testing.T) {
}
}
+func TestLoadBrokenParents(t *testing.T) {
+ // Test that a ref that has a parent that can't be loaded will not result in any leaks
+ // of other parent refs
+ t.Parallel()
+
+ ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
+
+ tmpdir := t.TempDir()
+
+ snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ require.NoError(t, snapshotter.Close())
+ })
+
+ co, cleanup, err := newCacheManager(ctx, t, cmOpt{
+ tmpdir: tmpdir,
+ snapshotter: snapshotter,
+ snapshotterName: "native",
+ })
+ require.NoError(t, err)
+ t.Cleanup(cleanup)
+ cm := co.manager.(*cacheManager)
+
+ mutRef, err := cm.New(ctx, nil, nil)
+ require.NoError(t, err)
+ refA, err := mutRef.Commit(ctx)
+ require.NoError(t, err)
+ refAID := refA.ID()
+ mutRef, err = cm.New(ctx, nil, nil)
+ require.NoError(t, err)
+ refB, err := mutRef.Commit(ctx)
+ require.NoError(t, err)
+
+ _, err = cm.Merge(ctx, []ImmutableRef{refA, refB}, nil)
+ require.NoError(t, err)
+ checkDiskUsage(ctx, t, cm, 3, 0)
+
+ // set refB as deleted
+ require.NoError(t, refB.(*immutableRef).queueDeleted())
+ require.NoError(t, refB.(*immutableRef).commitMetadata())
+ require.NoError(t, cm.Close())
+ cleanup()
+
+ co, cleanup, err = newCacheManager(ctx, t, cmOpt{
+ tmpdir: tmpdir,
+ snapshotter: snapshotter,
+ snapshotterName: "native",
+ })
+ require.NoError(t, err)
+ t.Cleanup(cleanup)
+ cm = co.manager.(*cacheManager)
+
+ checkDiskUsage(ctx, t, cm, 0, 1)
+ refA, err = cm.Get(ctx, refAID, nil)
+ require.NoError(t, err)
+ require.Len(t, refA.(*immutableRef).refs, 1)
+}
+
func checkDiskUsage(ctx context.Context, t *testing.T, cm Manager, inuse, unused int) {
du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{})
require.NoError(t, err)
@@ -2581,17 +2634,13 @@ func fileToBlob(file *os.File, compress bool) ([]byte, ocispecs.Descriptor, erro
}, nil
}
-func mapToSystemTarBlob(m map[string]string) ([]byte, ocispecs.Descriptor, error) {
- tmpdir, err := ioutil.TempDir("", "tarcreation")
- if err != nil {
- return nil, ocispecs.Descriptor{}, err
- }
- defer os.RemoveAll(tmpdir)
+func mapToSystemTarBlob(t *testing.T, m map[string]string) ([]byte, ocispecs.Descriptor, error) {
+ tmpdir := t.TempDir()
expected := map[string]string{}
for k, v := range m {
expected[k] = v
- if err := ioutil.WriteFile(filepath.Join(tmpdir, k), []byte(v), 0600); err != nil {
+ if err := os.WriteFile(filepath.Join(tmpdir, k), []byte(v), 0600); err != nil {
return nil, ocispecs.Descriptor{}, err
}
}
@@ -2620,7 +2669,7 @@ func mapToSystemTarBlob(m map[string]string) ([]byte, ocispecs.Descriptor, error
return nil, ocispecs.Descriptor{}, errors.Errorf("unexpected file %s", h.Name)
}
delete(expected, k)
- gotV, err := ioutil.ReadAll(tr)
+ gotV, err := io.ReadAll(tr)
if err != nil {
return nil, ocispecs.Descriptor{}, err
}
diff --git a/cache/metadata.go b/cache/metadata.go
index 121110bd13b0..82209a93c0f9 100644
--- a/cache/metadata.go
+++ b/cache/metadata.go
@@ -251,7 +251,13 @@ func (md *cacheMetadata) queueMediaType(str string) error {
}
func (md *cacheMetadata) getSnapshotID() string {
- return md.GetString(keySnapshot)
+ sid := md.GetString(keySnapshot)
+ // Note that historic buildkit releases did not always set the snapshot ID.
+ // Fallback to record ID is needed for old build cache compatibility.
+ if sid == "" {
+ return md.ID()
+ }
+ return sid
}
func (md *cacheMetadata) queueSnapshotID(str string) error {
@@ -551,9 +557,7 @@ func (md *cacheMetadata) appendStringSlice(key string, values ...string) error {
}
for _, existing := range slice {
- if _, ok := idx[existing]; ok {
- delete(idx, existing)
- }
+ delete(idx, existing)
}
if len(idx) == 0 {
diff --git a/cache/metadata/metadata.go b/cache/metadata/metadata.go
index ae957c3e72b7..170c0a8872f4 100644
--- a/cache/metadata/metadata.go
+++ b/cache/metadata/metadata.go
@@ -317,6 +317,9 @@ func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
func (s *StorageItem) Commit() error {
s.qmu.Lock()
defer s.qmu.Unlock()
+ if len(s.queue) == 0 {
+ return nil
+ }
return errors.WithStack(s.Update(func(b *bolt.Bucket) error {
for _, fn := range s.queue {
if err := fn(b); err != nil {
diff --git a/cache/metadata/metadata_test.go b/cache/metadata/metadata_test.go
index 7e3d5b055df6..0dc362773dda 100644
--- a/cache/metadata/metadata_test.go
+++ b/cache/metadata/metadata_test.go
@@ -1,8 +1,6 @@
package metadata
import (
- "io/ioutil"
- "os"
"path/filepath"
"testing"
@@ -13,9 +11,7 @@ import (
func TestGetSetSearch(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-storage")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
dbPath := filepath.Join(tmpdir, "storage.db")
@@ -112,9 +108,7 @@ func TestGetSetSearch(t *testing.T) {
func TestIndexes(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-storage")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
dbPath := filepath.Join(tmpdir, "storage.db")
@@ -172,9 +166,7 @@ func TestIndexes(t *testing.T) {
func TestExternalData(t *testing.T) {
t.Parallel()
- tmpdir, err := ioutil.TempDir("", "buildkit-storage")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
dbPath := filepath.Join(tmpdir, "storage.db")
diff --git a/cache/opts.go b/cache/opts.go
index 92df9989d928..1f1db6ca6105 100644
--- a/cache/opts.go
+++ b/cache/opts.go
@@ -36,4 +36,13 @@ func (m NeedsRemoteProviderError) Error() string {
return fmt.Sprintf("missing descriptor handlers for lazy blobs %+v", []digest.Digest(m))
}
-type ProgressKey struct{}
+type Unlazy session.Group
+
+func unlazySessionOf(opts ...RefOption) session.Group {
+ for _, opt := range opts {
+ if opt, ok := opt.(session.Group); ok {
+ return opt
+ }
+ }
+ return nil
+}
diff --git a/cache/refs.go b/cache/refs.go
index c937dd1bfa98..dc2cd561b01d 100644
--- a/cache/refs.go
+++ b/cache/refs.go
@@ -3,7 +3,6 @@ package cache
import (
"context"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -37,6 +36,8 @@ import (
"golang.org/x/sync/errgroup"
)
+var additionalAnnotations = append(compression.EStargzAnnotations, containerdUncompressed)
+
// Ref is a reference to cacheable objects.
type Ref interface {
Mountable
@@ -56,6 +57,7 @@ type ImmutableRef interface {
Extract(ctx context.Context, s session.Group) error // +progress
GetRemotes(ctx context.Context, createIfNeeded bool, cfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error)
LayerChain() RefList
+ FileList(ctx context.Context, s session.Group) ([]string, error)
}
type MutableRef interface {
@@ -533,7 +535,7 @@ func (cr *cacheRecord) layerDigestChain() []digest.Digest {
}
switch cr.kind() {
case Diff:
- if cr.getBlob() == "" {
+ if cr.getBlob() == "" && cr.diffParents.upper != nil {
// this diff just reuses the upper blob
cr.layerDigestChainCache = cr.diffParents.upper.layerDigestChain()
} else {
@@ -768,12 +770,9 @@ func (sr *immutableRef) getBlobWithCompression(ctx context.Context, compressionT
}
func getBlobWithCompression(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (ocispecs.Descriptor, error) {
- if compressionType == compression.UnknownCompression {
- return ocispecs.Descriptor{}, fmt.Errorf("cannot get unknown compression type")
- }
var target *ocispecs.Descriptor
if err := walkBlob(ctx, cs, desc, func(desc ocispecs.Descriptor) bool {
- if needs, err := needsConversion(ctx, cs, desc, compressionType); err == nil && !needs {
+ if needs, err := compressionType.NeedsConversion(ctx, cs, desc); err == nil && !needs {
target = &desc
return false
}
@@ -838,11 +837,11 @@ func getBlobDesc(ctx context.Context, cs content.Store, dgst digest.Digest) (oci
return ocispecs.Descriptor{}, err
}
if info.Labels == nil {
- return ocispecs.Descriptor{}, fmt.Errorf("no blob metadata is stored for %q", info.Digest)
+ return ocispecs.Descriptor{}, errors.Errorf("no blob metadata is stored for %q", info.Digest)
}
mt, ok := info.Labels[blobMediaTypeLabel]
if !ok {
- return ocispecs.Descriptor{}, fmt.Errorf("no media type is stored for %q", info.Digest)
+ return ocispecs.Descriptor{}, errors.Errorf("no media type is stored for %q", info.Digest)
}
desc := ocispecs.Descriptor{
Digest: info.Digest,
@@ -882,7 +881,7 @@ func filterAnnotationsForSave(a map[string]string) (b map[string]string) {
if a == nil {
return nil
}
- for _, k := range append(eStargzAnnotations, containerdUncompressed) {
+ for _, k := range additionalAnnotations {
v, ok := a[k]
if !ok {
continue
@@ -1552,12 +1551,12 @@ func readonlyOverlay(opt []string) []string {
func newSharableMountPool(tmpdirRoot string) (sharableMountPool, error) {
if tmpdirRoot != "" {
if err := os.MkdirAll(tmpdirRoot, 0700); err != nil {
- return sharableMountPool{}, fmt.Errorf("failed to prepare mount pool: %w", err)
+ return sharableMountPool{}, errors.Wrap(err, "failed to prepare mount pool")
}
// If tmpdirRoot is specified, remove existing mounts to avoid conflict.
files, err := os.ReadDir(tmpdirRoot)
if err != nil {
- return sharableMountPool{}, fmt.Errorf("failed to read mount pool: %w", err)
+ return sharableMountPool{}, errors.Wrap(err, "failed to read mount pool")
}
for _, file := range files {
if file.IsDir() {
@@ -1591,9 +1590,10 @@ func (p sharableMountPool) setSharable(mounts snapshot.Mountable) snapshot.Mount
// This is useful to share writable overlayfs mounts.
//
// NOTE: Mount() method doesn't return the underlying mount configuration (e.g. overlayfs mounts)
-// instead it always return bind mounts of the temporary mount point. So if the caller
-// needs to inspect the underlying mount configuration (e.g. for optimized differ for
-// overlayfs), this wrapper shouldn't be used.
+//
+// instead it always return bind mounts of the temporary mount point. So if the caller
+// needs to inspect the underlying mount configuration (e.g. for optimized differ for
+// overlayfs), this wrapper shouldn't be used.
type sharableMountable struct {
snapshot.Mountable
@@ -1631,7 +1631,7 @@ func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr er
// Don't need temporary mount wrapper for non-overlayfs mounts
return mounts, release, nil
}
- dir, err := ioutil.TempDir(sm.mountPoolRoot, "buildkit")
+ dir, err := os.MkdirTemp(sm.mountPoolRoot, "buildkit")
if err != nil {
return nil, nil, err
}
diff --git a/cache/remote.go b/cache/remote.go
index d0ac594b6ac8..b80bd79cfb0e 100644
--- a/cache/remote.go
+++ b/cache/remote.go
@@ -212,8 +212,8 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC
}
}
- if refCfg.Compression.Force {
- if needs, err := needsConversion(ctx, sr.cm.ContentStore, desc, refCfg.Compression.Type); err != nil {
+ if needsForceCompression(ctx, sr.cm.ContentStore, desc, refCfg) {
+ if needs, err := refCfg.Compression.Type.NeedsConversion(ctx, sr.cm.ContentStore, desc); err != nil {
return nil, err
} else if needs {
// ensure the compression type.
@@ -228,13 +228,13 @@ func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refC
newDesc.Size = blobDesc.Size
newDesc.URLs = blobDesc.URLs
newDesc.Annotations = nil
+ if len(addAnnotations) > 0 || len(blobDesc.Annotations) > 0 {
+ newDesc.Annotations = make(map[string]string)
+ }
for _, k := range addAnnotations {
newDesc.Annotations[k] = desc.Annotations[k]
}
for k, v := range blobDesc.Annotations {
- if newDesc.Annotations == nil {
- newDesc.Annotations = make(map[string]string)
- }
newDesc.Annotations[k] = v
}
desc = newDesc
diff --git a/cache/remotecache/azblob/exporter.go b/cache/remotecache/azblob/exporter.go
new file mode 100644
index 000000000000..4d76770db26a
--- /dev/null
+++ b/cache/remotecache/azblob/exporter.go
@@ -0,0 +1,214 @@
+package azblob
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+ "github.com/containerd/containerd/content"
+ "github.com/moby/buildkit/cache/remotecache"
+ v1 "github.com/moby/buildkit/cache/remotecache/v1"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/solver"
+ "github.com/moby/buildkit/util/compression"
+ "github.com/moby/buildkit/util/progress"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+// ResolveCacheExporterFunc for "azblob" cache exporter.
+func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
+ return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) {
+ config, err := getConfig(attrs)
+ if err != nil {
+ return nil, errors.WithMessage(err, "failed to create azblob config")
+ }
+
+ containerClient, err := createContainerClient(ctx, config)
+ if err != nil {
+ return nil, errors.WithMessage(err, "failed to create container client")
+ }
+
+ cc := v1.NewCacheChains()
+ return &exporter{
+ CacheExporterTarget: cc,
+ chains: cc,
+ containerClient: containerClient,
+ config: config,
+ }, nil
+ }
+}
+
+var _ remotecache.Exporter = &exporter{}
+
+type exporter struct {
+ solver.CacheExporterTarget
+ chains *v1.CacheChains
+ containerClient *azblob.ContainerClient
+ config *Config
+}
+
+func (ce *exporter) Name() string {
+ return "exporting cache to azure blob store"
+}
+
+func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
+ config, descs, err := ce.chains.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for i, l := range config.Layers {
+ dgstPair, ok := descs[l.Blob]
+ if !ok {
+ return nil, errors.Errorf("missing blob %s", l.Blob)
+ }
+ if dgstPair.Descriptor.Annotations == nil {
+ return nil, errors.Errorf("invalid descriptor without annotations")
+ }
+ var diffID digest.Digest
+ v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"]
+ if !ok {
+ return nil, errors.Errorf("invalid descriptor without uncompressed annotation")
+ }
+ dgst, err := digest.Parse(v)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse uncompressed annotation")
+ }
+ diffID = dgst
+
+ key := blobKey(ce.config, dgstPair.Descriptor.Digest.String())
+
+ exists, err := blobExists(ctx, ce.containerClient, key)
+ if err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("layers %s exists = %t", key, exists)
+
+ if !exists {
+ layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob))
+ ra, err := dgstPair.Provider.ReaderAt(ctx, dgstPair.Descriptor)
+ if err != nil {
+ err = errors.Wrapf(err, "failed to get reader for %s", dgstPair.Descriptor.Digest)
+ return nil, layerDone(err)
+ }
+ if err := ce.uploadBlobIfNotExists(ctx, key, content.NewReader(ra)); err != nil {
+ return nil, layerDone(err)
+ }
+ layerDone(nil)
+ }
+
+ la := &v1.LayerAnnotations{
+ DiffID: diffID,
+ Size: dgstPair.Descriptor.Size,
+ MediaType: dgstPair.Descriptor.MediaType,
+ }
+ if v, ok := dgstPair.Descriptor.Annotations["buildkit/createdat"]; ok {
+ var t time.Time
+ if err := (&t).UnmarshalText([]byte(v)); err != nil {
+ return nil, err
+ }
+ la.CreatedAt = t.UTC()
+ }
+ config.Layers[i].Annotations = la
+ }
+
+ dt, err := json.Marshal(config)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal config")
+ }
+
+ for _, name := range ce.config.Names {
+ if innerError := ce.uploadManifest(ctx, manifestKey(ce.config, name), bytesToReadSeekCloser(dt)); innerError != nil {
+ return nil, errors.Wrapf(innerError, "error writing manifest %s", name)
+ }
+ }
+
+ return nil, nil
+}
+
+func (ce *exporter) Config() remotecache.Config {
+ return remotecache.Config{
+ Compression: compression.New(compression.Default),
+ }
+}
+
+// For uploading manifests, use the Upload API which follows "last writer wins" sematics
+// This is slightly slower than UploadStream call but is safe to call concurrently from multiple threads. Refer to:
+// https://github.com/Azure/azure-sdk-for-go/issues/18490#issuecomment-1170806877
+func (ce *exporter) uploadManifest(ctx context.Context, manifestKey string, reader io.ReadSeekCloser) error {
+ defer reader.Close()
+ blobClient, err := ce.containerClient.NewBlockBlobClient(manifestKey)
+ if err != nil {
+ return errors.Wrap(err, "error creating container client")
+ }
+
+ ctx, cnclFn := context.WithTimeout(ctx, time.Minute*5)
+ defer cnclFn()
+
+ _, err = blobClient.Upload(ctx, reader, &azblob.BlockBlobUploadOptions{})
+ if err != nil {
+ return errors.Wrapf(err, "failed to upload blob %s: %v", manifestKey, err)
+ }
+
+ return nil
+}
+
+// For uploading blobs, use the UploadStream with access conditions which state that only upload if the blob
+// does not already exist. Since blobs are content addressable, this is the right thing to do for blobs and it gives
+// a performance improvement over the Upload API used for uploading manifests.
+func (ce *exporter) uploadBlobIfNotExists(ctx context.Context, blobKey string, reader io.Reader) error {
+ blobClient, err := ce.containerClient.NewBlockBlobClient(blobKey)
+ if err != nil {
+ return errors.Wrap(err, "error creating container client")
+ }
+
+ uploadCtx, cnclFn := context.WithTimeout(ctx, time.Minute*5)
+ defer cnclFn()
+
+ // Only upload if the blob doesn't exist
+ eTagAny := azblob.ETagAny
+ _, err = blobClient.UploadStream(uploadCtx, reader, azblob.UploadStreamOptions{
+ BufferSize: IOChunkSize,
+ MaxBuffers: IOConcurrency,
+ BlobAccessConditions: &azblob.BlobAccessConditions{
+ ModifiedAccessConditions: &azblob.ModifiedAccessConditions{
+ IfNoneMatch: &eTagAny,
+ },
+ },
+ })
+
+ if err == nil {
+ return nil
+ }
+
+ var se *azblob.StorageError
+ if errors.As(err, &se) && se.ErrorCode == azblob.StorageErrorCodeBlobAlreadyExists {
+ return nil
+ }
+
+ return errors.Wrapf(err, "failed to upload blob %s: %v", blobKey, err)
+}
+
+var _ io.ReadSeekCloser = &readSeekCloser{}
+
+type readSeekCloser struct {
+ io.Reader
+ io.Seeker
+ io.Closer
+}
+
+func bytesToReadSeekCloser(dt []byte) io.ReadSeekCloser {
+ bytesReader := bytes.NewReader(dt)
+ return &readSeekCloser{
+ Reader: bytesReader,
+ Seeker: bytesReader,
+ Closer: io.NopCloser(bytesReader),
+ }
+}
diff --git a/cache/remotecache/azblob/importer.go b/cache/remotecache/azblob/importer.go
new file mode 100644
index 000000000000..ea10c59f049a
--- /dev/null
+++ b/cache/remotecache/azblob/importer.go
@@ -0,0 +1,239 @@
+package azblob
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+ "github.com/containerd/containerd/content"
+ "github.com/moby/buildkit/cache/remotecache"
+ v1 "github.com/moby/buildkit/cache/remotecache/v1"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/solver"
+ "github.com/moby/buildkit/util/contentutil"
+ "github.com/moby/buildkit/util/progress"
+ "github.com/moby/buildkit/worker"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+ "golang.org/x/sync/errgroup"
+)
+
+// ResolveCacheImporterFunc for "azblob" cache importer.
+func ResolveCacheImporterFunc() remotecache.ResolveCacheImporterFunc {
+ return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) {
+ config, err := getConfig(attrs)
+ if err != nil {
+ return nil, ocispecs.Descriptor{}, errors.WithMessage(err, "failed to create azblob config")
+ }
+
+ containerClient, err := createContainerClient(ctx, config)
+ if err != nil {
+ return nil, ocispecs.Descriptor{}, errors.WithMessage(err, "failed to create container client")
+ }
+
+ importer := &importer{
+ config: config,
+ containerClient: containerClient,
+ }
+
+ return importer, ocispecs.Descriptor{}, nil
+ }
+}
+
+var _ remotecache.Importer = &importer{}
+
+type importer struct {
+ config *Config
+ containerClient *azblob.ContainerClient
+}
+
+func (ci *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
+ eg, ctx := errgroup.WithContext(ctx)
+ ccs := make([]*v1.CacheChains, len(ci.config.Names))
+
+ for i, name := range ci.config.Names {
+ func(i int, name string) {
+ eg.Go(func() error {
+ cc, err := ci.loadManifest(ctx, name)
+ if err != nil {
+ return errors.Wrapf(err, "failed to load cache manifest %s", name)
+ }
+ ccs[i] = cc
+ return nil
+ })
+ }(i, name)
+ }
+
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ cms := make([]solver.CacheManager, 0, len(ccs))
+
+ for _, cc := range ccs {
+ keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
+ if err != nil {
+ return nil, err
+ }
+ cms = append(cms, solver.NewCacheManager(ctx, id, keysStorage, resultStorage))
+ }
+
+ return solver.NewCombinedCacheManager(cms, nil), nil
+}
+
+func (ci *importer) loadManifest(ctx context.Context, name string) (*v1.CacheChains, error) {
+ key := manifestKey(ci.config, name)
+ exists, err := blobExists(ctx, ci.containerClient, key)
+ if err != nil {
+ return nil, err
+ }
+
+ logrus.Debugf("name %s cache with key %s exists = %v", name, key, exists)
+
+ if !exists {
+ return v1.NewCacheChains(), nil
+ }
+
+ blobClient, err := ci.containerClient.NewBlockBlobClient(key)
+ if err != nil {
+ return nil, errors.Wrap(err, "error creating container client")
+ }
+
+ res, err := blobClient.Download(ctx, &azblob.BlobDownloadOptions{})
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+
+ bytes, err := io.ReadAll(res.RawResponse.Body)
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+
+ logrus.Debugf("imported config: %s", string(bytes))
+
+ var config v1.CacheConfig
+ if err := json.Unmarshal(bytes, &config); err != nil {
+ return nil, errors.WithStack(err)
+ }
+
+ allLayers := v1.DescriptorProvider{}
+ for _, l := range config.Layers {
+ dpp, err := ci.makeDescriptorProviderPair(l)
+ if err != nil {
+ return nil, err
+ }
+ allLayers[l.Blob] = *dpp
+ }
+
+ progress.OneOff(ctx, fmt.Sprintf("found %d layers in cache", len(allLayers)))(nil)
+
+ cc := v1.NewCacheChains()
+ if err := v1.ParseConfig(config, allLayers, cc); err != nil {
+ return nil, err
+ }
+
+ return cc, nil
+}
+
+func (ci *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorProviderPair, error) {
+ if l.Annotations == nil {
+ return nil, errors.Errorf("cache layer with missing annotations")
+ }
+ annotations := map[string]string{}
+ if l.Annotations.DiffID == "" {
+ return nil, errors.Errorf("cache layer with missing diffid")
+ }
+ annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String()
+ if !l.Annotations.CreatedAt.IsZero() {
+ txt, err := l.Annotations.CreatedAt.MarshalText()
+ if err != nil {
+ return nil, errors.WithStack(err)
+ }
+ annotations["buildkit/createdat"] = string(txt)
+ }
+ desc := ocispecs.Descriptor{
+ MediaType: l.Annotations.MediaType,
+ Digest: l.Blob,
+ Size: l.Annotations.Size,
+ Annotations: annotations,
+ }
+ return &v1.DescriptorProviderPair{
+ Descriptor: desc,
+ Provider: &ciProvider{
+ desc: desc,
+ containerClient: ci.containerClient,
+ Provider: contentutil.FromFetcher(&fetcher{containerClient: ci.containerClient, config: ci.config}),
+ config: ci.config,
+ },
+ }, nil
+}
+
+type fetcher struct {
+ containerClient *azblob.ContainerClient
+ config *Config
+}
+
+func (f *fetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) {
+ key := blobKey(f.config, desc.Digest.String())
+ exists, err := blobExists(ctx, f.containerClient, key)
+ if err != nil {
+ return nil, err
+ }
+
+ if !exists {
+ return nil, errors.Errorf("blob %s not found", desc.Digest)
+ }
+
+ logrus.Debugf("reading layer from cache: %s", key)
+
+ blobClient, err := f.containerClient.NewBlockBlobClient(key)
+ if err != nil {
+ return nil, errors.Wrap(err, "error creating block blob client")
+ }
+
+ res, err := blobClient.Download(ctx, &azblob.BlobDownloadOptions{})
+ if err != nil {
+ return nil, err
+ }
+
+ return res.RawResponse.Body, nil
+}
+
+type ciProvider struct {
+ content.Provider
+ desc ocispecs.Descriptor
+ containerClient *azblob.ContainerClient
+ config *Config
+ checkMutex sync.Mutex
+ checked bool
+}
+
+func (p *ciProvider) CheckDescriptor(ctx context.Context, desc ocispecs.Descriptor) error {
+ if desc.Digest != p.desc.Digest {
+ return nil
+ }
+
+ if p.checked {
+ return nil
+ }
+
+ p.checkMutex.Lock()
+ defer p.checkMutex.Unlock()
+
+ key := blobKey(p.config, desc.Digest.String())
+ exists, err := blobExists(ctx, p.containerClient, key)
+ if err != nil {
+ return err
+ }
+
+ if !exists {
+ return errors.Errorf("blob %s not found", desc.Digest)
+ }
+
+ p.checked = true
+ return nil
+}
diff --git a/cache/remotecache/azblob/utils.go b/cache/remotecache/azblob/utils.go
new file mode 100644
index 000000000000..a993b4a485c8
--- /dev/null
+++ b/cache/remotecache/azblob/utils.go
@@ -0,0 +1,183 @@
+package azblob
+
+import (
+ "context"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
+ "github.com/pkg/errors"
+)
+
+const (
+ attrSecretAccessKey = "secret_access_key"
+ attrAccountURL = "account_url"
+ attrPrefix = "prefix"
+ attrManifestsPrefix = "manifests_prefix"
+ attrBlobsPrefix = "blobs_prefix"
+ attrName = "name"
+ attrContainer = "container"
+ IOConcurrency = 4
+ IOChunkSize = 32 * 1024 * 1024
+)
+
+type Config struct {
+ AccountURL string
+ Container string
+ Prefix string
+ ManifestsPrefix string
+ BlobsPrefix string
+ Names []string
+ AccountName string
+ secretAccessKey string
+}
+
+func getConfig(attrs map[string]string) (*Config, error) {
+ accountURLString, ok := attrs[attrAccountURL]
+ if !ok {
+ accountURLString, ok = os.LookupEnv("BUILDKIT_AZURE_STORAGE_ACCOUNT_URL")
+ if !ok {
+ return &Config{}, errors.New("either ${BUILDKIT_AZURE_STORAGE_ACCOUNT_URL} or account_url attribute is required for azblob cache")
+ }
+ }
+
+ accountURL, err := url.Parse(accountURLString)
+ if err != nil {
+ return &Config{}, errors.Wrap(err, "azure storage account url provided is not a valid url")
+ }
+
+ accountName := strings.Split(accountURL.Hostname(), ".")[0]
+
+ container, ok := attrs[attrContainer]
+ if !ok {
+ container, ok = os.LookupEnv("BUILDKIT_AZURE_STORAGE_CONTAINER")
+ if !ok {
+ container = "buildkit-cache"
+ }
+ }
+
+ prefix, ok := attrs[attrPrefix]
+ if !ok {
+ prefix, _ = os.LookupEnv("BUILDKIT_AZURE_STORAGE_PREFIX")
+ }
+
+ manifestsPrefix, ok := attrs[attrManifestsPrefix]
+ if !ok {
+ manifestsPrefix = "manifests"
+ }
+
+ blobsPrefix, ok := attrs[attrBlobsPrefix]
+ if !ok {
+ blobsPrefix = "blobs"
+ }
+
+ names := []string{"buildkit"}
+ name, ok := attrs[attrName]
+ if ok {
+ splittedNames := strings.Split(name, ";")
+ if len(splittedNames) > 0 {
+ names = splittedNames
+ }
+ }
+
+ secretAccessKey := attrs[attrSecretAccessKey]
+
+ config := Config{
+ AccountURL: accountURLString,
+ AccountName: accountName,
+ Container: container,
+ Prefix: prefix,
+ Names: names,
+ ManifestsPrefix: manifestsPrefix,
+ BlobsPrefix: blobsPrefix,
+ secretAccessKey: secretAccessKey,
+ }
+
+ return &config, nil
+}
+
+func createContainerClient(ctx context.Context, config *Config) (*azblob.ContainerClient, error) {
+ var serviceClient *azblob.ServiceClient
+ if config.secretAccessKey != "" {
+ sharedKey, err := azblob.NewSharedKeyCredential(config.AccountName, config.secretAccessKey)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create shared key")
+ }
+ serviceClient, err = azblob.NewServiceClientWithSharedKey(config.AccountURL, sharedKey, &azblob.ClientOptions{})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to created service client from shared key")
+ }
+ } else {
+ cred, err := azidentity.NewDefaultAzureCredential(nil)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create default azure credentials")
+ }
+
+ serviceClient, err = azblob.NewServiceClient(config.AccountURL, cred, &azblob.ClientOptions{})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to create service client")
+ }
+ }
+
+ ctx, cnclFn := context.WithTimeout(ctx, time.Second*60)
+ defer cnclFn()
+
+ containerClient, err := serviceClient.NewContainerClient(config.Container)
+ if err != nil {
+ return nil, errors.Wrap(err, "error creating container client")
+ }
+
+ _, err = containerClient.GetProperties(ctx, &azblob.ContainerGetPropertiesOptions{})
+ if err == nil {
+ return containerClient, nil
+ }
+
+ var se *azblob.StorageError
+ if errors.As(err, &se) && se.ErrorCode == azblob.StorageErrorCodeContainerNotFound {
+ ctx, cnclFn := context.WithTimeout(ctx, time.Minute*5)
+ defer cnclFn()
+ _, err := containerClient.Create(ctx, &azblob.ContainerCreateOptions{})
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create cache container %s", config.Container)
+ }
+
+ return containerClient, nil
+ }
+
+ return nil, errors.Wrapf(err, "failed to get properties of cache container %s", config.Container)
+}
+
+func manifestKey(config *Config, name string) string {
+ key := filepath.Join(config.Prefix, config.ManifestsPrefix, name)
+ return key
+}
+
+func blobKey(config *Config, digest string) string {
+ key := filepath.Join(config.Prefix, config.BlobsPrefix, digest)
+ return key
+}
+
+func blobExists(ctx context.Context, containerClient *azblob.ContainerClient, blobKey string) (bool, error) {
+ blobClient, err := containerClient.NewBlobClient(blobKey)
+ if err != nil {
+ return false, errors.Wrap(err, "error creating blob client")
+ }
+
+ ctx, cnclFn := context.WithTimeout(ctx, time.Second*60)
+ defer cnclFn()
+ _, err = blobClient.GetProperties(ctx, &azblob.BlobGetPropertiesOptions{})
+ if err == nil {
+ return true, nil
+ }
+
+ var se *azblob.StorageError
+ if errors.As(err, &se) && se.ErrorCode == azblob.StorageErrorCodeBlobNotFound {
+ return false, nil
+ }
+
+ return false, errors.Wrapf(err, "failed to check blob %s existence", blobKey)
+}
diff --git a/cache/remotecache/export.go b/cache/remotecache/export.go
index 1c3a240cfc19..a0fd7ba7e202 100644
--- a/cache/remotecache/export.go
+++ b/cache/remotecache/export.go
@@ -5,7 +5,6 @@ import (
"context"
"encoding/json"
"fmt"
- "time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
@@ -24,24 +23,10 @@ import (
type ResolveCacheExporterFunc func(ctx context.Context, g session.Group, attrs map[string]string) (Exporter, error)
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
- pw, _, _ := progress.NewFromContext(ctx)
- now := time.Now()
- st := progress.Status{
- Started: &now,
- }
- pw.Write(id, st)
- return func(err error) error {
- now := time.Now()
- st.Completed = &now
- pw.Write(id, st)
- pw.Close()
- return err
- }
-}
-
type Exporter interface {
solver.CacheExporterTarget
+ // Name uniquely identifies the exporter
+ Name() string
// Finalize finalizes and return metadata that are returned to the client
// e.g. ExporterResponseManifestDesc
Finalize(ctx context.Context) (map[string]string, error)
@@ -72,6 +57,10 @@ func NewExporter(ingester content.Ingester, ref string, oci bool, compressionCon
return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig}
}
+func (ce *contentCacheExporter) Name() string {
+ return "exporting content cache"
+}
+
func (ce *contentCacheExporter) Config() Config {
return Config{
Compression: ce.comp,
@@ -107,7 +96,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
if !ok {
return nil, errors.Errorf("missing blob %s", l.Blob)
}
- layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob))
+ layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob))
if err := contentutil.Copy(ctx, ce.ingester, dgstPair.Provider, dgstPair.Descriptor, ce.ref, logs.LoggerFromContext(ctx)); err != nil {
return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
}
@@ -127,7 +116,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
Size: int64(len(dt)),
MediaType: v1.CacheConfigMediaTypeV0,
}
- configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
+ configDone := progress.OneOff(ctx, fmt.Sprintf("writing config %s", dgst))
if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return nil, configDone(errors.Wrap(err, "error writing config blob"))
}
@@ -146,7 +135,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string
Size: int64(len(dt)),
MediaType: mfst.MediaType,
}
- mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
+ mfstDone := progress.OneOff(ctx, fmt.Sprintf("writing manifest %s", dgst))
if err := content.WriteBlob(ctx, ce.ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return nil, mfstDone(errors.Wrap(err, "error writing manifest blob"))
}
diff --git a/cache/remotecache/gha/gha.go b/cache/remotecache/gha/gha.go
index cefcf5ce1b98..f36693d3b08d 100644
--- a/cache/remotecache/gha/gha.go
+++ b/cache/remotecache/gha/gha.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "io"
"os"
"sync"
"time"
@@ -90,6 +91,10 @@ func NewExporter(c *Config) (remotecache.Exporter, error) {
return &exporter{CacheExporterTarget: cc, chains: cc, cache: cache, config: c}, nil
}
+func (*exporter) Name() string {
+ return "exporting to GitHub cache"
+}
+
func (ce *exporter) Config() remotecache.Config {
return remotecache.Config{
Compression: compression.New(compression.Default),
@@ -144,7 +149,7 @@ func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
return nil, err
}
if b == nil {
- layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob))
+ layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob))
ra, err := dgstPair.Provider.ReaderAt(ctx, dgstPair.Descriptor)
if err != nil {
return nil, layerDone(err)
@@ -367,22 +372,13 @@ type readerAt struct {
desc ocispecs.Descriptor
}
-func (r *readerAt) Size() int64 {
- return r.desc.Size
+func (r *readerAt) ReadAt(p []byte, off int64) (int, error) {
+ if off >= r.desc.Size {
+ return 0, io.EOF
+ }
+ return r.ReaderAtCloser.ReadAt(p, off)
}
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
- pw, _, _ := progress.NewFromContext(ctx)
- now := time.Now()
- st := progress.Status{
- Started: &now,
- }
- pw.Write(id, st)
- return func(err error) error {
- now := time.Now()
- st.Completed = &now
- pw.Write(id, st)
- pw.Close()
- return err
- }
+func (r *readerAt) Size() int64 {
+ return r.desc.Size
}
diff --git a/cache/remotecache/inline/inline.go b/cache/remotecache/inline/inline.go
index cf11db49596c..036ec059f76e 100644
--- a/cache/remotecache/inline/inline.go
+++ b/cache/remotecache/inline/inline.go
@@ -30,6 +30,10 @@ type exporter struct {
chains *v1.CacheChains
}
+func (*exporter) Name() string {
+ return "exporting inline cache"
+}
+
func (ce *exporter) Config() remotecache.Config {
return remotecache.Config{
Compression: compression.New(compression.Default),
@@ -52,16 +56,20 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest)
return nil, err
}
+ layerBlobDigests := make([]digest.Digest, len(layers))
+
descs2 := map[digest.Digest]v1.DescriptorProviderPair{}
- for _, k := range layers {
+ for i, k := range layers {
if v, ok := descs[k]; ok {
descs2[k] = v
+ layerBlobDigests[i] = k
continue
}
// fallback for uncompressed digests
for _, v := range descs {
if uc := v.Descriptor.Annotations["containerd.io/uncompressed"]; uc == string(k) {
descs2[v.Descriptor.Digest] = v
+ layerBlobDigests[i] = v.Descriptor.Digest
}
}
}
@@ -83,7 +91,7 @@ func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest)
// reorder layers based on the order in the image
blobIndexes := make(map[digest.Digest]int, len(layers))
- for i, blob := range layers {
+ for i, blob := range layerBlobDigests {
blobIndexes[blob] = i
}
diff --git a/cache/remotecache/local/local.go b/cache/remotecache/local/local.go
index 18c73364c03b..7f3d83b70f49 100644
--- a/cache/remotecache/local/local.go
+++ b/cache/remotecache/local/local.go
@@ -98,15 +98,28 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group,
if err != nil {
return nil, err
}
- return sessioncontent.NewCallerStore(caller, storeID), nil
+ return &unlazyProvider{sessioncontent.NewCallerStore(caller, storeID), g}, nil
+}
+
+type unlazyProvider struct {
+ content.Store
+ s session.Group
+}
+
+func (p *unlazyProvider) UnlazySession(desc ocispecs.Descriptor) session.Group {
+ return p.s
}
func attrsToCompression(attrs map[string]string) (*compression.Config, error) {
- compressionType := compression.Default
+ var compressionType compression.Type
if v, ok := attrs[attrLayerCompression]; ok {
- if c := compression.Parse(v); c != compression.UnknownCompression {
- compressionType = c
+ c, err := compression.Parse(v)
+ if err != nil {
+ return nil, err
}
+ compressionType = c
+ } else {
+ compressionType = compression.Default
}
compressionConfig := compression.New(compressionType)
if v, ok := attrs[attrForceCompression]; ok {
diff --git a/cache/remotecache/registry/registry.go b/cache/remotecache/registry/registry.go
index cfe54e52aa6e..e3b32eb29657 100644
--- a/cache/remotecache/registry/registry.go
+++ b/cache/remotecache/registry/registry.go
@@ -131,11 +131,15 @@ func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descript
}
func attrsToCompression(attrs map[string]string) (*compression.Config, error) {
- compressionType := compression.Default
+ var compressionType compression.Type
if v, ok := attrs[attrLayerCompression]; ok {
- if c := compression.Parse(v); c != compression.UnknownCompression {
- compressionType = c
+ c, err := compression.Parse(v)
+ if err != nil {
+ return nil, err
}
+ compressionType = c
+ } else {
+ compressionType = compression.Default
}
compressionConfig := compression.New(compressionType)
if v, ok := attrs[attrForceCompression]; ok {
diff --git a/cache/remotecache/s3/readerat.go b/cache/remotecache/s3/readerat.go
new file mode 100644
index 000000000000..666606817ec4
--- /dev/null
+++ b/cache/remotecache/s3/readerat.go
@@ -0,0 +1,75 @@
+package s3
+
+import (
+ "io"
+)
+
+type ReaderAtCloser interface {
+ io.ReaderAt
+ io.Closer
+}
+
+type readerAtCloser struct {
+ offset int64
+ rc io.ReadCloser
+ ra io.ReaderAt
+ open func(offset int64) (io.ReadCloser, error)
+ closed bool
+}
+
+func toReaderAtCloser(open func(offset int64) (io.ReadCloser, error)) ReaderAtCloser {
+ return &readerAtCloser{
+ open: open,
+ }
+}
+
+func (hrs *readerAtCloser) ReadAt(p []byte, off int64) (n int, err error) {
+ if hrs.closed {
+ return 0, io.EOF
+ }
+
+ if hrs.ra != nil {
+ return hrs.ra.ReadAt(p, off)
+ }
+
+ if hrs.rc == nil || off != hrs.offset {
+ if hrs.rc != nil {
+ hrs.rc.Close()
+ hrs.rc = nil
+ }
+ rc, err := hrs.open(off)
+ if err != nil {
+ return 0, err
+ }
+ hrs.rc = rc
+ }
+ if ra, ok := hrs.rc.(io.ReaderAt); ok {
+ hrs.ra = ra
+ n, err = ra.ReadAt(p, off)
+ } else {
+ for {
+ var nn int
+ nn, err = hrs.rc.Read(p)
+ n += nn
+ p = p[nn:]
+ if nn == len(p) || err != nil {
+ break
+ }
+ }
+ }
+
+ hrs.offset += int64(n)
+ return
+}
+
+func (hrs *readerAtCloser) Close() error {
+ if hrs.closed {
+ return nil
+ }
+ hrs.closed = true
+ if hrs.rc != nil {
+ return hrs.rc.Close()
+ }
+
+ return nil
+}
diff --git a/cache/remotecache/s3/s3.go b/cache/remotecache/s3/s3.go
new file mode 100644
index 000000000000..f0e814b2f6e9
--- /dev/null
+++ b/cache/remotecache/s3/s3.go
@@ -0,0 +1,476 @@
+package s3
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ aws_config "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/credentials"
+ "github.com/aws/aws-sdk-go-v2/feature/s3/manager"
+ "github.com/aws/aws-sdk-go-v2/service/s3"
+ "github.com/aws/smithy-go"
+ "github.com/containerd/containerd/content"
+ "github.com/moby/buildkit/cache/remotecache"
+ v1 "github.com/moby/buildkit/cache/remotecache/v1"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/solver"
+ "github.com/moby/buildkit/util/compression"
+ "github.com/moby/buildkit/util/progress"
+ "github.com/moby/buildkit/worker"
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+const (
+ attrBucket = "bucket"
+ attrRegion = "region"
+ attrPrefix = "prefix"
+ attrManifestsPrefix = "manifests_prefix"
+ attrBlobsPrefix = "blobs_prefix"
+ attrName = "name"
+ attrTouchRefresh = "touch_refresh"
+ attrEndpointURL = "endpoint_url"
+ attrAccessKeyID = "access_key_id"
+ attrSecretAccessKey = "secret_access_key"
+ attrSessionToken = "session_token"
+ attrUsePathStyle = "use_path_style"
+)
+
+type Config struct {
+ Bucket string
+ Region string
+ Prefix string
+ ManifestsPrefix string
+ BlobsPrefix string
+ Names []string
+ TouchRefresh time.Duration
+ EndpointURL string
+ AccessKeyID string
+ SecretAccessKey string
+ SessionToken string
+ UsePathStyle bool
+}
+
+func getConfig(attrs map[string]string) (Config, error) {
+ bucket, ok := attrs[attrBucket]
+ if !ok {
+ bucket, ok = os.LookupEnv("AWS_BUCKET")
+ if !ok {
+ return Config{}, errors.Errorf("bucket ($AWS_BUCKET) not set for s3 cache")
+ }
+ }
+
+ region, ok := attrs[attrRegion]
+ if !ok {
+ region, ok = os.LookupEnv("AWS_REGION")
+ if !ok {
+ return Config{}, errors.Errorf("region ($AWS_REGION) not set for s3 cache")
+ }
+ }
+
+ prefix := attrs[attrPrefix]
+
+ manifestsPrefix, ok := attrs[attrManifestsPrefix]
+ if !ok {
+ manifestsPrefix = "manifests/"
+ }
+
+ blobsPrefix, ok := attrs[attrBlobsPrefix]
+ if !ok {
+ blobsPrefix = "blobs/"
+ }
+
+ names := []string{"buildkit"}
+ name, ok := attrs[attrName]
+ if ok {
+ splittedNames := strings.Split(name, ";")
+ if len(splittedNames) > 0 {
+ names = splittedNames
+ }
+ }
+
+ touchRefresh := 24 * time.Hour
+
+ touchRefreshStr, ok := attrs[attrTouchRefresh]
+ if ok {
+ touchRefreshFromUser, err := time.ParseDuration(touchRefreshStr)
+ if err == nil {
+ touchRefresh = touchRefreshFromUser
+ }
+ }
+
+ endpointURL := attrs[attrEndpointURL]
+ accessKeyID := attrs[attrAccessKeyID]
+ secretAccessKey := attrs[attrSecretAccessKey]
+ sessionToken := attrs[attrSessionToken]
+
+ usePathStyle := false
+ usePathStyleStr, ok := attrs[attrUsePathStyle]
+ if ok {
+ usePathStyleUser, err := strconv.ParseBool(usePathStyleStr)
+ if err == nil {
+ usePathStyle = usePathStyleUser
+ }
+ }
+
+ return Config{
+ Bucket: bucket,
+ Region: region,
+ Prefix: prefix,
+ ManifestsPrefix: manifestsPrefix,
+ BlobsPrefix: blobsPrefix,
+ Names: names,
+ TouchRefresh: touchRefresh,
+ EndpointURL: endpointURL,
+ AccessKeyID: accessKeyID,
+ SecretAccessKey: secretAccessKey,
+ SessionToken: sessionToken,
+ UsePathStyle: usePathStyle,
+ }, nil
+}
+
+// ResolveCacheExporterFunc for s3 cache exporter.
+func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
+ return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) {
+ config, err := getConfig(attrs)
+ if err != nil {
+ return nil, err
+ }
+
+ s3Client, err := newS3Client(ctx, config)
+ if err != nil {
+ return nil, err
+ }
+ cc := v1.NewCacheChains()
+ return &exporter{CacheExporterTarget: cc, chains: cc, s3Client: s3Client, config: config}, nil
+ }
+}
+
+type exporter struct {
+ solver.CacheExporterTarget
+ chains *v1.CacheChains
+ s3Client *s3Client
+ config Config
+}
+
+func (*exporter) Name() string {
+ return "exporting cache to s3"
+}
+
+func (e *exporter) Config() remotecache.Config {
+ return remotecache.Config{
+ Compression: compression.New(compression.Default),
+ }
+}
+
+func (e *exporter) Finalize(ctx context.Context) (map[string]string, error) {
+ cacheConfig, descs, err := e.chains.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ for i, l := range cacheConfig.Layers {
+ dgstPair, ok := descs[l.Blob]
+ if !ok {
+ return nil, errors.Errorf("missing blob %s", l.Blob)
+ }
+ if dgstPair.Descriptor.Annotations == nil {
+ return nil, errors.Errorf("invalid descriptor without annotations")
+ }
+ v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"]
+ if !ok {
+ return nil, errors.Errorf("invalid descriptor without uncompressed annotation")
+ }
+ diffID, err := digest.Parse(v)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse uncompressed annotation")
+ }
+
+ key := e.s3Client.blobKey(dgstPair.Descriptor.Digest)
+ exists, err := e.s3Client.exists(ctx, key)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to check file presence in cache")
+ }
+ if exists != nil {
+ if time.Since(*exists) > e.config.TouchRefresh {
+ err = e.s3Client.touch(ctx, key)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to touch file")
+ }
+ }
+ } else {
+ layerDone := progress.OneOff(ctx, fmt.Sprintf("writing layer %s", l.Blob))
+ dt, err := content.ReadBlob(ctx, dgstPair.Provider, dgstPair.Descriptor)
+ if err != nil {
+ return nil, layerDone(err)
+ }
+ if err := e.s3Client.saveMutable(ctx, key, dt); err != nil {
+ return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
+ }
+ layerDone(nil)
+ }
+
+ la := &v1.LayerAnnotations{
+ DiffID: diffID,
+ Size: dgstPair.Descriptor.Size,
+ MediaType: dgstPair.Descriptor.MediaType,
+ }
+ if v, ok := dgstPair.Descriptor.Annotations["buildkit/createdat"]; ok {
+ var t time.Time
+ if err := (&t).UnmarshalText([]byte(v)); err != nil {
+ return nil, err
+ }
+ la.CreatedAt = t.UTC()
+ }
+ cacheConfig.Layers[i].Annotations = la
+ }
+
+ dt, err := json.Marshal(cacheConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, name := range e.config.Names {
+ if err := e.s3Client.saveMutable(ctx, e.s3Client.manifestKey(name), dt); err != nil {
+ return nil, errors.Wrapf(err, "error writing manifest: %s", name)
+ }
+ }
+ return nil, nil
+}
+
+// ResolveCacheImporterFunc for s3 cache importer.
+func ResolveCacheImporterFunc() remotecache.ResolveCacheImporterFunc {
+ return func(ctx context.Context, _ session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) {
+ config, err := getConfig(attrs)
+ if err != nil {
+ return nil, ocispecs.Descriptor{}, err
+ }
+ s3Client, err := newS3Client(ctx, config)
+ if err != nil {
+ return nil, ocispecs.Descriptor{}, err
+ }
+ return &importer{s3Client, config}, ocispecs.Descriptor{}, nil
+ }
+}
+
+type importer struct {
+ s3Client *s3Client
+ config Config
+}
+
+func (i *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorProviderPair, error) {
+ if l.Annotations == nil {
+ return nil, errors.Errorf("cache layer with missing annotations")
+ }
+ if l.Annotations.DiffID == "" {
+ return nil, errors.Errorf("cache layer with missing diffid")
+ }
+ annotations := map[string]string{}
+ annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String()
+ if !l.Annotations.CreatedAt.IsZero() {
+ txt, err := l.Annotations.CreatedAt.MarshalText()
+ if err != nil {
+ return nil, err
+ }
+ annotations["buildkit/createdat"] = string(txt)
+ }
+ return &v1.DescriptorProviderPair{
+ Provider: i.s3Client,
+ Descriptor: ocispecs.Descriptor{
+ MediaType: l.Annotations.MediaType,
+ Digest: l.Blob,
+ Size: l.Annotations.Size,
+ Annotations: annotations,
+ },
+ }, nil
+}
+
+func (i *importer) load(ctx context.Context) (*v1.CacheChains, error) {
+ var config v1.CacheConfig
+ found, err := i.s3Client.getManifest(ctx, i.s3Client.manifestKey(i.config.Names[0]), &config)
+ if err != nil {
+ return nil, err
+ }
+ if !found {
+ return v1.NewCacheChains(), nil
+ }
+
+ allLayers := v1.DescriptorProvider{}
+
+ for _, l := range config.Layers {
+ dpp, err := i.makeDescriptorProviderPair(l)
+ if err != nil {
+ return nil, err
+ }
+ allLayers[l.Blob] = *dpp
+ }
+
+ cc := v1.NewCacheChains()
+ if err := v1.ParseConfig(config, allLayers, cc); err != nil {
+ return nil, err
+ }
+ return cc, nil
+}
+
+func (i *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
+ cc, err := i.load(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
+ if err != nil {
+ return nil, err
+ }
+
+ return solver.NewCacheManager(ctx, id, keysStorage, resultStorage), nil
+}
+
+type readerAt struct {
+ ReaderAtCloser
+ size int64
+}
+
+func (r *readerAt) Size() int64 {
+ return r.size
+}
+
+type s3Client struct {
+ *s3.Client
+ *manager.Uploader
+ bucket string
+ prefix string
+ blobsPrefix string
+ manifestsPrefix string
+}
+
+func newS3Client(ctx context.Context, config Config) (*s3Client, error) {
+ cfg, err := aws_config.LoadDefaultConfig(ctx, aws_config.WithRegion(config.Region))
+ if err != nil {
+ return nil, errors.Errorf("Unable to load AWS SDK config, %v", err)
+ }
+ client := s3.NewFromConfig(cfg, func(options *s3.Options) {
+ if config.AccessKeyID != "" && config.SecretAccessKey != "" {
+ options.Credentials = credentials.NewStaticCredentialsProvider(config.AccessKeyID, config.SecretAccessKey, config.SessionToken)
+ }
+ if config.EndpointURL != "" {
+ options.UsePathStyle = config.UsePathStyle
+ options.EndpointResolver = s3.EndpointResolverFromURL(config.EndpointURL)
+ }
+ })
+
+ return &s3Client{
+ Client: client,
+ Uploader: manager.NewUploader(client),
+ bucket: config.Bucket,
+ prefix: config.Prefix,
+ blobsPrefix: config.BlobsPrefix,
+ manifestsPrefix: config.ManifestsPrefix,
+ }, nil
+}
+
+func (s3Client *s3Client) getManifest(ctx context.Context, key string, config *v1.CacheConfig) (bool, error) {
+ input := &s3.GetObjectInput{
+ Bucket: &s3Client.bucket,
+ Key: &key,
+ }
+
+ output, err := s3Client.GetObject(ctx, input)
+ if err != nil {
+ if isNotFound(err) {
+ return false, nil
+ }
+ return false, err
+ }
+ defer output.Body.Close()
+
+ decoder := json.NewDecoder(output.Body)
+ if err := decoder.Decode(config); err != nil {
+ return false, errors.WithStack(err)
+ }
+
+ return true, nil
+}
+
+func (s3Client *s3Client) getReader(ctx context.Context, key string) (io.ReadCloser, error) {
+ input := &s3.GetObjectInput{
+ Bucket: &s3Client.bucket,
+ Key: &key,
+ }
+
+ output, err := s3Client.GetObject(ctx, input)
+ if err != nil {
+ return nil, err
+ }
+ return output.Body, nil
+}
+
+func (s3Client *s3Client) saveMutable(ctx context.Context, key string, value []byte) error {
+ input := &s3.PutObjectInput{
+ Bucket: &s3Client.bucket,
+ Key: &key,
+
+ Body: bytes.NewReader(value),
+ }
+ _, err := s3Client.Upload(ctx, input)
+ return err
+}
+
+func (s3Client *s3Client) exists(ctx context.Context, key string) (*time.Time, error) {
+ input := &s3.HeadObjectInput{
+ Bucket: &s3Client.bucket,
+ Key: &key,
+ }
+
+ head, err := s3Client.HeadObject(ctx, input)
+ if err != nil {
+ if isNotFound(err) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return head.LastModified, nil
+}
+
+func (s3Client *s3Client) touch(ctx context.Context, key string) error {
+ copySource := fmt.Sprintf("%s/%s", s3Client.bucket, key)
+ cp := &s3.CopyObjectInput{
+ Bucket: &s3Client.bucket,
+ CopySource: ©Source,
+ Key: &key,
+ Metadata: map[string]string{"updated-at": time.Now().String()},
+ MetadataDirective: "REPLACE",
+ }
+
+ _, err := s3Client.CopyObject(ctx, cp)
+
+ return err
+}
+
+func (s3Client *s3Client) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) {
+ readerAtCloser := toReaderAtCloser(func(offset int64) (io.ReadCloser, error) {
+ return s3Client.getReader(ctx, s3Client.blobKey(desc.Digest))
+ })
+ return &readerAt{ReaderAtCloser: readerAtCloser, size: desc.Size}, nil
+}
+
+func (s3Client *s3Client) manifestKey(name string) string {
+ return s3Client.prefix + s3Client.manifestsPrefix + name
+}
+
+func (s3Client *s3Client) blobKey(dgst digest.Digest) string {
+ return s3Client.prefix + s3Client.blobsPrefix + dgst.String()
+}
+
+func isNotFound(err error) bool {
+ var errapi smithy.APIError
+ return errors.As(err, &errapi) && (errapi.ErrorCode() == "NoSuchKey" || errapi.ErrorCode() == "NotFound")
+}
diff --git a/cache/remotecache/v1/cachestorage.go b/cache/remotecache/v1/cachestorage.go
index 7ba7eb0f6059..a4f7f6ad055f 100644
--- a/cache/remotecache/v1/cachestorage.go
+++ b/cache/remotecache/v1/cachestorage.go
@@ -276,7 +276,7 @@ func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheR
// Any of blobs in the remote must meet the specified compression option.
match := false
for _, desc := range r.result.Descriptors {
- m := compressionopts.Type.IsMediaType(desc.MediaType)
+ m := compression.IsMediaType(compressionopts.Type, desc.MediaType)
match = match || m
if compressionopts.Force && !m {
match = false
diff --git a/cache/remotecache/v1/chains.go b/cache/remotecache/v1/chains.go
index 306e037f7f1c..8c8bbde5dc76 100644
--- a/cache/remotecache/v1/chains.go
+++ b/cache/remotecache/v1/chains.go
@@ -146,7 +146,7 @@ func (c *item) removeLink(src *item) bool {
return found
}
-func (c *item) AddResult(createdAt time.Time, result *solver.Remote) {
+func (c *item) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) {
c.resultTime = createdAt
c.result = result
}
@@ -214,7 +214,7 @@ func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}
type nopRecord struct {
}
-func (c *nopRecord) AddResult(createdAt time.Time, result *solver.Remote) {
+func (c *nopRecord) AddResult(_ digest.Digest, _ int, createdAt time.Time, result *solver.Remote) {
}
func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
diff --git a/cache/remotecache/v1/chains_test.go b/cache/remotecache/v1/chains_test.go
index 7e2a2f525769..5e7bcd0691c7 100644
--- a/cache/remotecache/v1/chains_test.go
+++ b/cache/remotecache/v1/chains_test.go
@@ -29,7 +29,7 @@ func TestSimpleMarshal(t *testing.T) {
Digest: dgst("d1"),
}},
}
- baz.AddResult(time.Now(), r0)
+ baz.AddResult("", 0, time.Now(), r0)
}
addRecords()
diff --git a/cache/remotecache/v1/doc.go b/cache/remotecache/v1/doc.go
index 97d21a452068..a1b00d86f68f 100644
--- a/cache/remotecache/v1/doc.go
+++ b/cache/remotecache/v1/doc.go
@@ -1,6 +1,6 @@
package cacheimport
-// Distibutable build cache
+// Distributable build cache
//
// Main manifest is OCI image index
// https://github.com/opencontainers/image-spec/blob/master/image-index.md .
@@ -13,7 +13,7 @@ package cacheimport
// Cache config file layout:
//
//{
-// "layers": [
+// "layers": [ <- layers contains references to blobs
// {
// "blob": "sha256:deadbeef", <- digest of layer blob in index
// "parent": -1 <- index of parent layer, -1 if no parent
@@ -24,20 +24,26 @@ package cacheimport
// }
// ],
//
-// "records": [
+// "records": [ <- records contains chains of cache keys
// {
// "digest": "sha256:deadbeef", <- base digest for the record
// },
// {
// "digest": "sha256:deadbeef",
// "output": 1, <- optional output index
-// "layers": [ <- optional array or layer chains
+// "layers": [ <- optional array of layer pointers
// {
// "createdat": "",
-// "layer": 1, <- index to the layer
+// "layer": 1, <- index to the layers array, layer is loaded with all of its parents
// }
// ],
-// "inputs": [ <- dependant records
+// "chains": [ <- optional array of layer pointer lists
+// {
+// "createdat": "",
+// "layers": [1], <- indexes to the layers array, all layers are loaded in specified order without parents
+// }
+// ],
+// "inputs": [ <- dependant records, this is how cache keys are linked together
// [ <- index of the dependency (0)
// {
// "selector": "sel", <- optional selector
diff --git a/cache/remotecache/v1/parse.go b/cache/remotecache/v1/parse.go
index 65a6e441f575..3c8294a602c0 100644
--- a/cache/remotecache/v1/parse.go
+++ b/cache/remotecache/v1/parse.go
@@ -61,7 +61,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.
return nil, err
}
if remote != nil {
- r.AddResult(res.CreatedAt, remote)
+ r.AddResult("", 0, res.CreatedAt, remote)
}
}
@@ -86,7 +86,7 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.
}
if remote != nil {
remote.Provider = mp
- r.AddResult(res.CreatedAt, remote)
+ r.AddResult("", 0, res.CreatedAt, remote)
}
}
diff --git a/cache/util/fsutil.go b/cache/util/fsutil.go
index b425a002a542..e90ed45f77f4 100644
--- a/cache/util/fsutil.go
+++ b/cache/util/fsutil.go
@@ -3,7 +3,6 @@ package util
import (
"context"
"io"
- "io/ioutil"
"os"
"path/filepath"
@@ -59,7 +58,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([
}
if req.Range == nil {
- dt, err = ioutil.ReadFile(fp)
+ dt, err = os.ReadFile(fp)
if err != nil {
return errors.WithStack(err)
}
@@ -68,7 +67,7 @@ func ReadFile(ctx context.Context, mount snapshot.Mountable, req ReadRequest) ([
if err != nil {
return errors.WithStack(err)
}
- dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
+ dt, err = io.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
f.Close()
if err != nil {
return errors.WithStack(err)
diff --git a/client/build.go b/client/build.go
index 25b3aa6d7ccf..2a4bc9e105d1 100644
--- a/client/build.go
+++ b/client/build.go
@@ -20,17 +20,14 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
}
}()
- if opt.Frontend != "" {
- return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend")
- }
+ feOpts := opt.FrontendAttrs
+
+ opt.Frontend = ""
if product == "" {
product = apicaps.ExportedProduct
}
- feOpts := opt.FrontendAttrs
- opt.FrontendAttrs = nil
-
workers, err := c.ListWorkers(ctx)
if err != nil {
return nil, errors.Wrap(err, "listing workers for Build")
@@ -113,6 +110,19 @@ func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.Sta
return g.gateway.StatFile(ctx, in, opts...)
}
+func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) {
+ if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil {
+ if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil {
+ return nil, err
+ }
+ ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
+ _, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...)
+ return &gatewayapi.EvaluateResponse{}, err
+ }
+ ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
+ return g.gateway.Evaluate(ctx, in, opts...)
+}
+
func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Ping(ctx, in, opts...)
diff --git a/client/build_test.go b/client/build_test.go
index 58fa5f4684cb..1376c1515ae9 100644
--- a/client/build_test.go
+++ b/client/build_test.go
@@ -3,11 +3,8 @@ package client
import (
"bytes"
"context"
- "encoding/base64"
- "encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strconv"
@@ -17,7 +14,6 @@ import (
"time"
"github.com/moby/buildkit/client/llb"
- "github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/frontend/gateway/client"
gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
@@ -26,7 +22,6 @@ import (
"github.com/moby/buildkit/session/sshforward/sshprovider"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
- binfotypes "github.com/moby/buildkit/util/buildinfo/types"
"github.com/moby/buildkit/util/entitlements"
utilsystem "github.com/moby/buildkit/util/system"
"github.com/moby/buildkit/util/testutil/echoserver"
@@ -59,7 +54,8 @@ func TestClientGatewayIntegration(t *testing.T) {
testClientGatewayContainerExtraHosts,
testClientGatewayContainerSignal,
testWarnings,
- testClientGatewayFrontendAttrs,
+ testClientGatewayNilResult,
+ testClientGatewayEmptyImageExec,
), integration.WithMirroredImages(integration.OfficialImages("busybox:latest")))
integration.Run(t, integration.TestFuncs(
@@ -134,9 +130,7 @@ func testClientGatewaySolve(t *testing.T, sb integration.Sandbox) {
return r, nil
}
- tmpdir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
testStr := "This is a test"
@@ -153,7 +147,7 @@ func testClientGatewaySolve(t *testing.T, sb integration.Sandbox) {
}, product, b, nil)
require.NoError(t, err)
- read, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo"))
+ read, err := os.ReadFile(filepath.Join(tmpdir, "foo"))
require.NoError(t, err)
require.Equal(t, testStr, string(read))
@@ -476,7 +470,7 @@ func testClientGatewayContainerExecPipe(t *testing.T, sb integration.Sandbox) {
Args: []string{"cat"},
Cwd: "/",
Tty: false,
- Stdin: ioutil.NopCloser(stdin2),
+ Stdin: io.NopCloser(stdin2),
Stdout: stdout2,
})
@@ -688,17 +682,14 @@ func testClientGatewayContainerMounts(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
defer c.Close()
- tmpdir, err := ioutil.TempDir("", "buildkit-buildctl")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
- err = ioutil.WriteFile(filepath.Join(tmpdir, "local-file"), []byte("local"), 0644)
+ err = os.WriteFile(filepath.Join(tmpdir, "local-file"), []byte("local"), 0644)
require.NoError(t, err)
a := agent.NewKeyring()
- sockPath, clean, err := makeSSHAgentSock(a)
+ sockPath, err := makeSSHAgentSock(t, a)
require.NoError(t, err)
- defer clean()
ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{
ID: t.Name(),
@@ -1624,6 +1615,7 @@ func testClientGatewayExecFileActionError(t *testing.T, sb integration.Sandbox)
// testClientGatewayContainerSecurityMode ensures that the correct security mode
// is propagated to the gateway container
func testClientGatewayContainerSecurityMode(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureSecurityMode)
requiresLinux(t)
ctx := sb.Context()
@@ -1650,7 +1642,6 @@ func testClientGatewayContainerSecurityMode(t *testing.T, sb integration.Sandbox
}
allowedEntitlements = []entitlements.Entitlement{}
} else {
- skipDockerd(t, sb)
assertCaps = func(caps uint64) {
/*
$ capsh --decode=0000003fffffffff
@@ -1999,62 +1990,86 @@ func testClientGatewayContainerSignal(t *testing.T, sb integration.Sandbox) {
checkAllReleasable(t, c, sb, true)
}
-// moby/buildkit#2476
-func testClientGatewayFrontendAttrs(t *testing.T, sb integration.Sandbox) {
+func testClientGatewayNilResult(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- fooattrval := "bar"
- bazattrval := "fuu"
-
b := func(ctx context.Context, c client.Client) (*client.Result, error) {
- st := llb.Image("busybox:latest").Run(
- llb.ReadonlyRootFS(),
- llb.Args([]string{"/bin/sh", "-c", `echo hello`}),
- )
- def, err := st.Marshal(sb.Context())
+ st := llb.Image("busybox:latest")
+ diff := llb.Diff(st, st)
+ def, err := diff.Marshal(sb.Context())
if err != nil {
return nil, err
}
res, err := c.Solve(ctx, client.SolveRequest{
Definition: def.ToPB(),
- FrontendOpt: map[string]string{
- "build-arg:foo": fooattrval,
- },
+ Evaluate: true,
})
require.NoError(t, err)
- require.Contains(t, res.Metadata, exptypes.ExporterBuildInfo)
-
- var bi binfotypes.BuildInfo
- require.NoError(t, json.Unmarshal(res.Metadata[exptypes.ExporterBuildInfo], &bi))
- require.Contains(t, bi.Attrs, "build-arg:foo")
- bi.Attrs["build-arg:baz"] = &bazattrval
- bmbi, err := json.Marshal(bi)
+ ref, err := res.SingleRef()
require.NoError(t, err)
- res.AddMeta(exptypes.ExporterBuildInfo, bmbi)
- return res, err
+ dirEnts, err := ref.ReadDir(ctx, client.ReadDirRequest{
+ Path: "/",
+ })
+ require.NoError(t, err)
+ require.Len(t, dirEnts, 0)
+ return nil, nil
}
- res, err := c.Build(sb.Context(), SolveOpt{}, "", b, nil)
+ _, err = c.Build(sb.Context(), SolveOpt{}, "", b, nil)
require.NoError(t, err)
+}
- require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo)
- decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo])
+func testClientGatewayEmptyImageExec(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
+ defer c.Close()
- var bi binfotypes.BuildInfo
- require.NoError(t, json.Unmarshal(decbi, &bi))
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+ target := registry + "/buildkit/testemptyimage:latest"
- require.Contains(t, bi.Attrs, "build-arg:foo")
- require.Equal(t, &fooattrval, bi.Attrs["build-arg:foo"])
- require.Contains(t, bi.Attrs, "build-arg:baz")
- require.Equal(t, &bazattrval, bi.Attrs["build-arg:baz"])
+ // push an empty image
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, "", func(ctx context.Context, c client.Client) (*client.Result, error) {
+ return client.NewResult(), nil
+ }, nil)
+ require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ _, err = c.Build(sb.Context(), SolveOpt{}, "", func(ctx context.Context, gw client.Client) (*client.Result, error) {
+ // create an exec on that empty image (expected to fail, but not to panic)
+ st := llb.Image(target).Run(
+ llb.Args([]string{"echo", "hello"}),
+ ).Root()
+ def, err := st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
+ _, err = gw.Solve(ctx, client.SolveRequest{
+ Definition: def.ToPB(),
+ Evaluate: true,
+ })
+ require.ErrorContains(t, err, `process "echo hello" did not complete successfully`)
+ return nil, nil
+ }, nil)
+ require.NoError(t, err)
}
type nopCloser struct {
diff --git a/client/client.go b/client/client.go
index 8c9259a4a9d1..deac2507a996 100644
--- a/client/client.go
+++ b/client/client.go
@@ -4,11 +4,12 @@ import (
"context"
"crypto/tls"
"crypto/x509"
- "io/ioutil"
"net"
"net/url"
+ "os"
"strings"
+ contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/defaults"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
controlapi "github.com/moby/buildkit/api/services/control"
@@ -168,12 +169,16 @@ func (c *Client) setupDelegatedTracing(ctx context.Context, td TracerDelegate) e
return td.SetSpanExporter(ctx, e)
}
-func (c *Client) controlClient() controlapi.ControlClient {
+func (c *Client) ControlClient() controlapi.ControlClient {
return controlapi.NewControlClient(c.conn)
}
+func (c *Client) ContentClient() contentapi.ContentClient {
+ return contentapi.NewContentClient(c.conn)
+}
+
func (c *Client) Dialer() session.Dialer {
- return grpchijack.Dialer(c.controlClient())
+ return grpchijack.Dialer(c.ControlClient())
}
func (c *Client) Close() error {
@@ -212,7 +217,7 @@ func WithCredentials(serverName, ca, cert, key string) ClientOpt {
}
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
- ca, err := ioutil.ReadFile(opts.CACert)
+ ca, err := os.ReadFile(opts.CACert)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
@@ -234,7 +239,6 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
- cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
diff --git a/client/client_nydus_test.go b/client/client_nydus_test.go
new file mode 100644
index 000000000000..ecaffba6bbfd
--- /dev/null
+++ b/client/client_nydus_test.go
@@ -0,0 +1,139 @@
+//go:build nydus
+// +build nydus
+
+package client
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+
+ "github.com/containerd/containerd/images"
+ "github.com/containerd/containerd/namespaces"
+ nydusify "github.com/containerd/nydus-snapshotter/pkg/converter"
+ "github.com/moby/buildkit/client/llb"
+ "github.com/moby/buildkit/identity"
+ "github.com/moby/buildkit/util/compression"
+ "github.com/moby/buildkit/util/testutil/integration"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNydusIntegration(t *testing.T) {
+ testIntegration(
+ t,
+ testBuildExportNydusWithHybrid,
+ )
+}
+
+func testBuildExportNydusWithHybrid(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" {
+ t.Skip("test requires containerd worker")
+ }
+
+ client, err := newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ var (
+ imageService = client.ImageService()
+ contentStore = client.ContentStore()
+ ctx = namespaces.WithNamespace(sb.Context(), "buildkit")
+ )
+
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ buildNydus := func(file string) {
+ orgImage := "docker.io/library/alpine:latest"
+ baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/" + file}))
+ def, err := baseDef.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ target := registry + "/nydus/alpine:" + identity.NewID()
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "compression": "nydus",
+ "oci-mediatypes": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ img, err := imageService.Get(ctx, target)
+ require.NoError(t, err)
+
+ manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
+
+ require.Equal(t, len(manifest.Layers), 3)
+ require.Equal(t, "true", manifest.Layers[0].Annotations[nydusify.LayerAnnotationNydusBlob])
+ require.Equal(t, "true", manifest.Layers[1].Annotations[nydusify.LayerAnnotationNydusBlob])
+ require.Equal(t, "true", manifest.Layers[2].Annotations[nydusify.LayerAnnotationNydusBootstrap])
+ }
+
+ buildOther := func(file string, compType compression.Type, forceCompression bool) {
+ orgImage := "docker.io/library/alpine:latest"
+ baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/" + file}))
+ def, err := baseDef.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ mediaTypes := map[compression.Type]string{
+ compression.Gzip: ocispecs.MediaTypeImageLayerGzip,
+ compression.Zstd: ocispecs.MediaTypeImageLayer + "+zstd",
+ }
+ target := fmt.Sprintf("%s/%s/alpine:%s", registry, compType, identity.NewID())
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "compression": compType.String(),
+ "oci-mediatypes": "true",
+ "force-compression": strconv.FormatBool(forceCompression),
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ img, err := imageService.Get(ctx, target)
+ require.NoError(t, err)
+
+ manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
+
+ require.Equal(t, 2, len(manifest.Layers))
+ require.Equal(t, mediaTypes[compType], manifest.Layers[0].MediaType)
+ require.Equal(t, mediaTypes[compType], manifest.Layers[1].MediaType)
+ }
+
+ // Make sure that the nydus compression layer is not mixed with other
+ // types of compression layers in an image.
+ buildNydus("foo")
+ buildOther("foo", compression.Gzip, false)
+ buildOther("foo", compression.Zstd, true)
+
+ buildOther("bar", compression.Gzip, false)
+ buildOther("bar", compression.Zstd, true)
+ buildNydus("bar")
+}
diff --git a/client/client_test.go b/client/client_test.go
index cda4e58c59e4..b97eb75f274b 100644
--- a/client/client_test.go
+++ b/client/client_test.go
@@ -12,10 +12,10 @@ import (
"encoding/pem"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"os"
+ "path"
"path/filepath"
"runtime"
"strconv"
@@ -26,6 +26,7 @@ import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/content/local"
ctderrdefs "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/namespaces"
@@ -33,31 +34,42 @@ import (
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/continuity/fs/fstest"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
gateway "github.com/moby/buildkit/frontend/gateway/client"
+ gatewaypb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/secrets/secretsprovider"
"github.com/moby/buildkit/session/sshforward/sshprovider"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
+ "github.com/moby/buildkit/solver/result"
+ "github.com/moby/buildkit/sourcepolicy"
+ sourcepolicypb "github.com/moby/buildkit/sourcepolicy/pb"
+ spb "github.com/moby/buildkit/sourcepolicy/pb"
+ "github.com/moby/buildkit/util/attestation"
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/entitlements"
+ "github.com/moby/buildkit/util/purl"
"github.com/moby/buildkit/util/testutil"
"github.com/moby/buildkit/util/testutil/echoserver"
"github.com/moby/buildkit/util/testutil/httpserver"
"github.com/moby/buildkit/util/testutil/integration"
+ digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
+ spdx "github.com/spdx/tools-golang/spdx/v2_3"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/sync/errgroup"
)
func init() {
- if os.Getenv("TEST_DOCKERD") == "1" {
+ if integration.IsTestDockerd() {
integration.InitDockerdWorker()
} else {
integration.InitOCIWorker()
@@ -72,26 +84,25 @@ type nopWriteCloser struct {
func (nopWriteCloser) Close() error { return nil }
func TestIntegration(t *testing.T) {
- mirroredImages := integration.OfficialImages("busybox:latest", "alpine:latest")
- mirroredImages["tonistiigi/test:nolayers"] = "docker.io/tonistiigi/test:nolayers"
- mirroredImages["cpuguy83/buildkit-foreign:latest"] = "docker.io/cpuguy83/buildkit-foreign:latest"
- mirrors := integration.WithMirroredImages(mirroredImages)
-
- tests := integration.TestFuncs(
+ testIntegration(
+ t,
testCacheExportCacheKeyLoop,
testRelativeWorkDir,
testFileOpMkdirMkfile,
testFileOpCopyRm,
testFileOpCopyIncludeExclude,
testFileOpRmWildcard,
+ testFileOpCopyUIDCache,
testCallDiskUsage,
testBuildMultiMount,
testBuildHTTPSource,
testBuildPushAndValidate,
testBuildExportWithUncompressed,
+ testBuildExportScratch,
testResolveAndHosts,
testUser,
testOCIExporter,
+ testOCIExporterContentStore,
testWhiteoutParentDir,
testFrontendImageNaming,
testDuplicateWhiteouts,
@@ -143,6 +154,8 @@ func TestIntegration(t *testing.T) {
testFileOpInputSwap,
testRelativeMountpoint,
testLocalSourceDiffer,
+ testOCILayoutSource,
+ testOCILayoutPlatformSource,
testBuildExportZstd,
testPullZstdImage,
testMergeOp,
@@ -156,12 +169,42 @@ func TestIntegration(t *testing.T) {
testBuildInfoInline,
testBuildInfoNoExport,
testZstdLocalCacheExport,
+ testCacheExportIgnoreError,
testZstdRegistryCacheImportExport,
testZstdLocalCacheImportExport,
testUncompressedLocalCacheImportExport,
testUncompressedRegistryCacheImportExport,
testStargzLazyRegistryCacheImportExport,
+ testValidateDigestOrigin,
+ testCallInfo,
+ testPullWithLayerLimit,
+ testExportAnnotations,
+ testExportAnnotationsMediaTypes,
+ testExportAttestations,
+ testAttestationDefaultSubject,
+ testSourceDateEpochLayerTimestamps,
+ testSourceDateEpochClamp,
+ testSourceDateEpochReset,
+ testSourceDateEpochLocalExporter,
+ testSourceDateEpochTarExporter,
+ testAttestationBundle,
+ testSBOMScan,
+ testSBOMScanSingleRef,
+ testSBOMSupplements,
+ testMultipleCacheExports,
+ testMountStubsDirectory,
+ testMountStubsTimestamp,
+ testSourcePolicy,
)
+}
+
+func testIntegration(t *testing.T, funcs ...func(t *testing.T, sb integration.Sandbox)) {
+ mirroredImages := integration.OfficialImages("busybox:latest", "alpine:latest")
+ mirroredImages["tonistiigi/test:nolayers"] = "docker.io/tonistiigi/test:nolayers"
+ mirroredImages["cpuguy83/buildkit-foreign:latest"] = "docker.io/cpuguy83/buildkit-foreign:latest"
+ mirrors := integration.WithMirroredImages(mirroredImages)
+
+ tests := integration.TestFuncs(funcs...)
tests = append(tests, diffOpTestCases()...)
integration.Run(t, tests, mirrors)
@@ -186,6 +229,15 @@ func TestIntegration(t *testing.T) {
"host": hostNetwork,
}),
)
+
+ integration.Run(t, integration.TestFuncs(
+ testBridgeNetworkingDNSNoRootless,
+ ),
+ mirrors,
+ integration.WithMatrix("netmode", map[string]interface{}{
+ "dns": bridgeDNSNetwork,
+ }),
+ )
}
func newContainerd(cdAddress string) (*containerd.Client, error) {
@@ -194,15 +246,14 @@ func newContainerd(cdAddress string) (*containerd.Client, error) {
// moby/buildkit#1336
func testCacheExportCacheKeyLoop(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- tmpdir, err := ioutil.TempDir("", "buildkit-buildctl")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
- err = ioutil.WriteFile(filepath.Join(tmpdir, "foo"), []byte("foodata"), 0600)
+ err = os.WriteFile(filepath.Join(tmpdir, "foo"), []byte("foodata"), 0600)
require.NoError(t, err)
for _, mode := range []bool{false, true} {
@@ -258,6 +309,46 @@ func testBridgeNetworking(t *testing.T, sb integration.Sandbox) {
_, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
require.Error(t, err)
}
+
+func testBridgeNetworkingDNSNoRootless(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCNINetwork)
+ if os.Getenv("BUILDKIT_RUN_NETWORK_INTEGRATION_TESTS") == "" {
+ t.SkipNow()
+ }
+
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ name := identity.NewID()
+ server, err := llb.Image("busybox").
+ Run(
+ llb.Shlexf(`sh -c 'test "$(nc -l -p 1234)" = "foo"'`),
+ llb.Hostname(name),
+ ).
+ Marshal(sb.Context())
+ require.NoError(t, err)
+
+ client, err := llb.Image("busybox").
+ Run(
+ llb.Shlexf("sh -c 'until echo foo | nc " + name + " 1234 -w0; do sleep 0.1; done'"),
+ ).
+ Marshal(sb.Context())
+ require.NoError(t, err)
+
+ eg, ctx := errgroup.WithContext(context.Background())
+ eg.Go(func() error {
+ _, err := c.Solve(ctx, server, SolveOpt{}, nil)
+ return err
+ })
+ eg.Go(func() error {
+ _, err := c.Solve(ctx, client, SolveOpt{}, nil)
+ return err
+ })
+ err = eg.Wait()
+ require.NoError(t, err)
+}
+
func testHostNetworking(t *testing.T, sb integration.Sandbox) {
if os.Getenv("BUILDKIT_RUN_NETWORK_INTEGRATION_TESTS") == "" {
t.SkipNow()
@@ -297,9 +388,7 @@ func testExportBusyboxLocal(t *testing.T, sb integration.Sandbox) {
def, err := llb.Image("busybox").Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -390,9 +479,8 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
err = a.Add(agent.AddedKey{PrivateKey: k})
require.NoError(t, err)
- sockPath, clean, err := makeSSHAgentSock(a)
+ sockPath, err := makeSSHAgentSock(t, a)
require.NoError(t, err)
- defer clean()
ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{
Paths: []string{sockPath},
@@ -439,9 +527,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
def, err = out.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -454,11 +540,11 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "sock"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "sock"))
require.NoError(t, err)
require.Equal(t, "/run/buildkit/ssh_agent.0", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Contains(t, string(dt), "2048")
require.Contains(t, string(dt), "(RSA)")
@@ -486,7 +572,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Contains(t, string(dt), "agent refused operation")
@@ -500,7 +586,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
def, err = out.Marshal(sb.Context())
require.NoError(t, err)
- k, err = rsa.GenerateKey(rand.Reader, 1024)
+ k, err = rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err)
dt = pem.EncodeToMemory(
@@ -510,11 +596,9 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
},
)
- tmpDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ tmpDir := t.TempDir()
- err = ioutil.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600)
+ err = os.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600)
require.NoError(t, err)
ssh, err = sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{
@@ -522,9 +606,7 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
}})
require.NoError(t, err)
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -537,9 +619,9 @@ func testSSHMount(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
- require.Contains(t, string(dt), "1024")
+ require.Contains(t, string(dt), "2048")
require.Contains(t, string(dt), "(RSA)")
}
@@ -572,9 +654,7 @@ func testShmSize(t *testing.T, sb integration.Sandbox) {
def, err := out.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -586,7 +666,7 @@ func testShmSize(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Contains(t, string(dt), `size=131072k`)
}
@@ -609,9 +689,7 @@ func testUlimit(t *testing.T, sb integration.Sandbox) {
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -623,11 +701,11 @@ func testUlimit(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "first"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "first"))
require.NoError(t, err)
require.Equal(t, `1062`, strings.TrimSpace(string(dt)))
- dt2, err := ioutil.ReadFile(filepath.Join(destDir, "second"))
+ dt2, err := os.ReadFile(filepath.Join(destDir, "second"))
require.NoError(t, err)
require.NotEqual(t, `1062`, strings.TrimSpace(string(dt2)))
}
@@ -654,9 +732,7 @@ func testCgroupParent(t *testing.T, sb integration.Sandbox) {
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -668,11 +744,11 @@ func testCgroupParent(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "first"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "first"))
require.NoError(t, err)
require.Contains(t, strings.TrimSpace(string(dt)), `/foocgroup/buildkit/`)
- dt2, err := ioutil.ReadFile(filepath.Join(destDir, "second"))
+ dt2, err := os.ReadFile(filepath.Join(destDir, "second"))
require.NoError(t, err)
require.NotContains(t, strings.TrimSpace(string(dt2)), `/foocgroup/buildkit/`)
}
@@ -706,7 +782,7 @@ func testNetworkMode(t *testing.T, sb integration.Sandbox) {
}
func testPushByDigest(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -751,6 +827,7 @@ func testPushByDigest(t *testing.T, sb integration.Sandbox) {
}
func testSecurityMode(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureSecurityMode)
command := `sh -c 'cat /proc/self/status | grep CapEff | cut -f 2 > /out'`
mode := llb.SecurityModeSandbox
var allowedEntitlements []entitlements.Entitlement
@@ -767,7 +844,6 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) {
}
allowedEntitlements = []entitlements.Entitlement{}
} else {
- skipDockerd(t, sb)
assertCaps = func(caps uint64) {
/*
$ capsh --decode=0000003fffffffff
@@ -796,9 +872,7 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) {
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -812,7 +886,7 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
- contents, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ contents, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
caps, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 16, 64)
@@ -824,6 +898,7 @@ func testSecurityMode(t *testing.T, sb integration.Sandbox) {
}
func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureSecurityMode)
if sb.Rootless() {
t.SkipNow()
}
@@ -834,7 +909,6 @@ func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) {
if secMode == securitySandbox {
allowedEntitlements = []entitlements.Entitlement{}
} else {
- skipDockerd(t, sb)
mode = llb.SecurityModeInsecure
allowedEntitlements = []entitlements.Entitlement{entitlements.EntitlementSecurityInsecure}
}
@@ -843,7 +917,12 @@ func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
defer c.Close()
- command := `mkdir /sys/fs/cgroup/cpuset/securitytest`
+ cg := "/sys/fs/cgroup/cpuset/securitytest" // cgroup v1
+ if _, err := os.Stat("/sys/fs/cgroup/cpuset"); errors.Is(err, os.ErrNotExist) {
+ cg = "/sys/fs/cgroup/securitytest" // cgroup v2
+ }
+
+ command := "mkdir " + cg
st := llb.Image("busybox:latest").
Run(llb.Shlex(command),
llb.Security(mode))
@@ -858,7 +937,7 @@ func testSecurityModeSysfs(t *testing.T, sb integration.Sandbox) {
if secMode == securitySandbox {
require.Error(t, err)
require.Contains(t, err.Error(), "did not complete successfully")
- require.Contains(t, err.Error(), "mkdir /sys/fs/cgroup/cpuset/securitytest")
+ require.Contains(t, err.Error(), "mkdir "+cg)
} else {
require.NoError(t, err)
}
@@ -896,7 +975,7 @@ func testSecurityModeErrors(t *testing.T, sb integration.Sandbox) {
}
func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureDirectPush)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -911,13 +990,12 @@ func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) {
checkImageName := map[string]func(out, imageName string, exporterResponse map[string]string){
ExporterOCI: func(out, imageName string, exporterResponse map[string]string) {
// Nothing to check
- return
},
ExporterDocker: func(out, imageName string, exporterResponse map[string]string) {
require.Contains(t, exporterResponse, "image.name")
require.Equal(t, exporterResponse["image.name"], "docker.io/library/"+imageName)
- dt, err := ioutil.ReadFile(out)
+ dt, err := os.ReadFile(out)
require.NoError(t, err)
m, err := testutil.ReadTarToMap(dt, false)
@@ -989,9 +1067,7 @@ func testFrontendImageNaming(t *testing.T, sb integration.Sandbox) {
for _, exp := range []string{ExporterOCI, ExporterDocker, ExporterImage} {
exp := exp // capture loop variable.
t.Run(exp, func(t *testing.T) {
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
so := SolveOpt{
Exports: []ExportEntry{
@@ -1063,9 +1139,9 @@ func testSecretMounts(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- // test optional
+ // test optional, mount should not exist when secret not present in SolveOpt
st = llb.Image("busybox:latest").
- Run(llb.Shlex(`echo secret2`), llb.AddSecret("/run/secrets/mysecret2", llb.SecretOptional))
+ Run(llb.Shlex(`test ! -f /run/secrets/mysecret2`), llb.AddSecret("/run/secrets/mysecret2", llb.SecretOptional))
def, err = st.Marshal(sb.Context())
require.NoError(t, err)
@@ -1102,6 +1178,20 @@ func testSecretMounts(t *testing.T, sb integration.Sandbox) {
})},
}, nil)
require.NoError(t, err)
+
+ // test empty cert still creates secret file
+ st = llb.Image("busybox:latest").
+ Run(llb.Shlex(`test -f /run/secrets/mysecret5`), llb.AddSecret("/run/secrets/mysecret5", llb.SecretID("mysecret")))
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Session: []session.Attachable{secretsprovider.FromMap(map[string][]byte{
+ "mysecret": []byte(""),
+ })},
+ }, nil)
+ require.NoError(t, err)
}
func testSecretEnv(t *testing.T, sb integration.Sandbox) {
@@ -1202,7 +1292,8 @@ func testLocalSymlinkEscape(t *testing.T, sb integration.Sandbox) {
[[ $(readlink /mount/sub/bar) == "../../../etc/group" ]]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
// point to absolute path that is not part of dir
fstest.Symlink("/etc/passwd", "foo"),
fstest.CreateDir("sub", 0700),
@@ -1220,7 +1311,6 @@ func testLocalSymlinkEscape(t *testing.T, sb integration.Sandbox) {
fstest.CreateFile("test.sh", test, 0700),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
local := llb.Local("mylocal", llb.FollowPaths([]string{
"test.sh", "foo", "sub/bar", "bax", "sub/sub2/file",
@@ -1255,9 +1345,7 @@ func testRelativeWorkDir(t *testing.T, sb integration.Sandbox) {
def, err := pwd.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1269,7 +1357,7 @@ func testRelativeWorkDir(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "pwd"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "pwd"))
require.NoError(t, err)
require.Equal(t, []byte("/test1/test2\n"), dt)
}
@@ -1286,9 +1374,7 @@ func testFileOpMkdirMkfile(t *testing.T, sb integration.Sandbox) {
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1304,7 +1390,7 @@ func testFileOpMkdirMkfile(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
require.Equal(t, true, fi.IsDir())
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, []byte("contents"), dt)
}
@@ -1315,20 +1401,20 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
defer c.Close()
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("myfile", []byte("data0"), 0600),
fstest.CreateDir("sub", 0700),
fstest.CreateFile("sub/foo", []byte("foo0"), 0600),
fstest.CreateFile("sub/bar", []byte("bar0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- dir2, err := tmpdir(
+ dir2, err := integration.Tmpdir(
+ t,
fstest.CreateFile("file2", []byte("file2"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
st := llb.Scratch().
File(
@@ -1340,9 +1426,7 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) {
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1358,7 +1442,7 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "myfile2"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "myfile2"))
require.NoError(t, err)
require.Equal(t, []byte("data0"), dt)
@@ -1366,32 +1450,91 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
require.Equal(t, true, fi.IsDir())
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "out/bar"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out/bar"))
require.NoError(t, err)
require.Equal(t, []byte("bar0"), dt)
_, err = os.Stat(filepath.Join(destDir, "out/foo"))
require.ErrorIs(t, err, os.ErrNotExist)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "file2"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "file2"))
require.NoError(t, err)
require.Equal(t, []byte("file2"), dt)
}
+// moby/buildkit#3291
+func testFileOpCopyUIDCache(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ st := llb.Scratch().File(
+ llb.Copy(llb.Image("alpine").Run(llb.Shlex(`sh -c 'echo 123 > /foo && chown 1000:1000 /foo'`)).Root(), "foo", "foo"))
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ var buf bytes.Buffer
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterTar,
+ Output: fixedWriteCloser(&nopWriteCloser{&buf}),
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ m, err := testutil.ReadTarToMap(buf.Bytes(), false)
+ require.NoError(t, err)
+
+ fi, ok := m["foo"]
+ require.True(t, ok)
+ require.Equal(t, 1000, fi.Header.Uid)
+ require.Equal(t, 1000, fi.Header.Gid)
+
+ // repeat to check cache does not apply for different uid
+ st = llb.Scratch().File(
+ llb.Copy(llb.Image("alpine").Run(llb.Shlex(`sh -c 'echo 123 > /foo'`)).Root(), "foo", "foo"))
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ buf = bytes.Buffer{}
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterTar,
+ Output: fixedWriteCloser(&nopWriteCloser{&buf}),
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ m, err = testutil.ReadTarToMap(buf.Bytes(), false)
+ require.NoError(t, err)
+
+ fi, ok = m["foo"]
+ require.True(t, ok)
+ require.Equal(t, 0, fi.Header.Uid)
+ require.Equal(t, 0, fi.Header.Gid)
+}
+
func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("myfile", []byte("data0"), 0600),
fstest.CreateDir("sub", 0700),
fstest.CreateFile("sub/foo", []byte("foo0"), 0600),
fstest.CreateFile("sub/bar", []byte("bar0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
st := llb.Scratch().File(
llb.Copy(
@@ -1411,9 +1554,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) {
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1428,7 +1569,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "sub", "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "sub", "foo"))
require.NoError(t, err)
require.Equal(t, []byte("foo0"), dt)
@@ -1437,7 +1578,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) {
require.ErrorIs(t, err, os.ErrNotExist)
}
- randBytes, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ randBytes, err := os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
// Create additional file which doesn't match the include pattern, and make
@@ -1460,9 +1601,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) {
def, err = st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1477,7 +1616,7 @@ func testFileOpCopyIncludeExclude(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- randBytes2, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ randBytes2, err := os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
require.Equal(t, randBytes, randBytes2)
@@ -1527,11 +1666,11 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT
require.NoError(t, err)
defer c.Close()
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("foo", []byte("foo"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
tv := syscall.NsecToTimespec(time.Now().UnixNano())
@@ -1543,9 +1682,7 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT
def, err := st.Marshal(context.TODO())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(context.TODO(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1560,11 +1697,11 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, []byte("foo"), dt)
- err = ioutil.WriteFile(filepath.Join(dir, "foo"), []byte("bar"), 0600)
+ err = os.WriteFile(filepath.Join(dir, "foo"), []byte("bar"), 0600)
require.NoError(t, err)
err = syscall.UtimesNano(filepath.Join(dir, "foo"), []syscall.Timespec{tv, tv})
@@ -1583,7 +1720,7 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT
}, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
if d == llb.DiffMetadata {
require.Equal(t, []byte("foo"), dt)
@@ -1593,135 +1730,443 @@ func testLocalSourceWithDiffer(t *testing.T, sb integration.Sandbox, d llb.DiffT
}
}
-func testFileOpRmWildcard(t *testing.T, sb integration.Sandbox) {
+func testOCILayoutSource(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout)
requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ c, err := New(context.TODO(), sb.Address())
require.NoError(t, err)
defer c.Close()
- dir, err := tmpdir(
- fstest.CreateDir("foo", 0700),
- fstest.CreateDir("bar", 0700),
- fstest.CreateFile("foo/target", []byte("foo0"), 0600),
- fstest.CreateFile("bar/target", []byte("bar0"), 0600),
- fstest.CreateFile("bar/remaining", []byte("bar1"), 0600),
- )
- require.NoError(t, err)
- defer os.RemoveAll(dir)
+ // create a tempdir where we will store the OCI layout
+ dir := t.TempDir()
- st := llb.Scratch().File(
- llb.Copy(llb.Local("mylocal"), "foo", "foo").
- Copy(llb.Local("mylocal"), "bar", "bar"),
- ).File(
- llb.Rm("*/target", llb.WithAllowWildcard(true)),
- )
- def, err := st.Marshal(sb.Context())
- require.NoError(t, err)
+ // make an image that is exported there
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+ }
+
+ run(`sh -c "echo -n first > foo"`)
+ run(`sh -c "echo -n second > bar"`)
- destDir, err := ioutil.TempDir("", "buildkit")
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ outW := bytes.NewBuffer(nil)
+ attrs := map[string]string{}
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDir,
+ Type: ExporterOCI,
+ Attrs: attrs,
+ Output: fixedWriteCloser(nopWriteCloser{outW}),
},
},
- LocalDirs: map[string]string{
- "mylocal": dir,
- },
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar/remaining"))
- require.NoError(t, err)
- require.Equal(t, []byte("bar1"), dt)
-
- fi, err := os.Stat(filepath.Join(destDir, "foo"))
+ // extract the tar stream to the directory as OCI layout
+ m, err := testutil.ReadTarToMap(outW.Bytes(), false)
require.NoError(t, err)
- require.Equal(t, true, fi.IsDir())
- _, err = os.Stat(filepath.Join(destDir, "foo/target"))
- require.ErrorIs(t, err, os.ErrNotExist)
-
- _, err = os.Stat(filepath.Join(destDir, "bar/target"))
- require.ErrorIs(t, err, os.ErrNotExist)
-}
+ for filename, content := range m {
+ fullFilename := path.Join(dir, filename)
+ err = os.MkdirAll(path.Dir(fullFilename), 0755)
+ require.NoError(t, err)
+ if content.Header.FileInfo().IsDir() {
+ err = os.MkdirAll(fullFilename, 0755)
+ require.NoError(t, err)
+ } else {
+ err = os.WriteFile(fullFilename, content.Data, 0644)
+ require.NoError(t, err)
+ }
+ }
-func testCallDiskUsage(t *testing.T, sb integration.Sandbox) {
- c, err := New(sb.Context(), sb.Address())
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
- defer c.Close()
- _, err = c.DiskUsage(sb.Context())
+ require.Equal(t, 1, len(index.Manifests))
+ digest := index.Manifests[0].Digest
+
+ store, err := local.NewStore(dir)
require.NoError(t, err)
-}
-func testBuildMultiMount(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ // reference the OCI Layout in a build
+ // note that the key does not need to be the directory name, just something
+ // unique. since we are doing just one build with one remote here, we can
+ // give it any ID
+ csID := "my-content-store"
+ st = llb.OCILayout(fmt.Sprintf("not/real@%s", digest), llb.OCIStore("", csID))
+
+ def, err = st.Marshal(context.TODO())
require.NoError(t, err)
- defer c.Close()
- alpine := llb.Image("docker.io/library/alpine:latest")
- ls := alpine.Run(llb.Shlex("/bin/ls -l"))
- busybox := llb.Image("docker.io/library/busybox:latest")
- cp := ls.Run(llb.Shlex("/bin/cp -a /busybox/etc/passwd baz"))
- cp.AddMount("/busybox", busybox)
+ destDir := t.TempDir()
- def, err := cp.Marshal(sb.Context())
+ _, err = c.Solve(context.TODO(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ OCIStores: map[string]content.Store{
+ csID: store,
+ },
+ }, nil)
require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
+ require.Equal(t, []byte("first"), dt)
- checkAllReleasable(t, c, sb, true)
+ dt, err = os.ReadFile(filepath.Join(destDir, "bar"))
+ require.NoError(t, err)
+ require.Equal(t, []byte("second"), dt)
}
-func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) {
- c, err := New(sb.Context(), sb.Address())
+func testOCILayoutPlatformSource(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout)
+ requiresLinux(t)
+ c, err := New(context.TODO(), sb.Address())
require.NoError(t, err)
defer c.Close()
- modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time
-
- resp := httpserver.Response{
- Etag: identity.NewID(),
- Content: []byte("content1"),
- LastModified: &modTime,
- }
+ // create a tempdir where we will store the OCI layout
+ dir := t.TempDir()
- server := httpserver.NewTestServer(map[string]httpserver.Response{
- "/foo": resp,
- })
- defer server.Close()
+ platformsToTest := []string{"linux/amd64", "linux/arm64"}
- // invalid URL first
- st := llb.HTTP(server.URL + "/bar")
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ expPlatforms := &exptypes.Platforms{
+ Platforms: make([]exptypes.Platform, len(platformsToTest)),
+ }
+ for i, platform := range platformsToTest {
+ st := llb.Scratch().File(
+ llb.Mkfile("platform", 0600, []byte(platform)),
+ )
+
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
- def, err := st.Marshal(sb.Context())
- require.NoError(t, err)
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid response status 404")
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
- // first correct request
- st = llb.HTTP(server.URL + "/foo")
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(platform, ref)
- def, err = st.Marshal(sb.Context())
- require.NoError(t, err)
+ expPlatforms.Platforms[i] = exptypes.Platform{
+ ID: platform,
+ Platform: platforms.MustParse(platform),
+ }
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
+ return res, nil
+ }
+ attrs := map[string]string{}
+ outW := bytes.NewBuffer(nil)
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterOCI,
+ Attrs: attrs,
+ Output: fixedWriteCloser(nopWriteCloser{outW}),
+ },
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ // extract the tar stream to the directory as OCI layout
+ m, err := testutil.ReadTarToMap(outW.Bytes(), false)
+ require.NoError(t, err)
+
+ for filename, tarItem := range m {
+ fullFilename := path.Join(dir, filename)
+ err = os.MkdirAll(path.Dir(fullFilename), 0755)
+ require.NoError(t, err)
+ if tarItem.Header.FileInfo().IsDir() {
+ err = os.MkdirAll(fullFilename, 0755)
+ require.NoError(t, err)
+ } else {
+ err = os.WriteFile(fullFilename, tarItem.Data, 0644)
+ require.NoError(t, err)
+ }
+ }
+
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(index.Manifests))
+ digest := index.Manifests[0].Digest
+
+ store, err := local.NewStore(dir)
+ require.NoError(t, err)
+ csID := "my-content-store"
+
+ destDir := t.TempDir()
+
+ frontendOCILayout := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ expPlatforms := &exptypes.Platforms{
+ Platforms: make([]exptypes.Platform, len(platformsToTest)),
+ }
+ for i, platform := range platformsToTest {
+ st := llb.OCILayout(fmt.Sprintf("not/real@%s", digest), llb.OCIStore("", csID))
+
+ def, err := st.Marshal(ctx, llb.Platform(platforms.MustParse(platform)))
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(platform, ref)
+
+ expPlatforms.Platforms[i] = exptypes.Platform{
+ ID: platform,
+ Platform: platforms.MustParse(platform),
+ }
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ return res, nil
+ }
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ OCIStores: map[string]content.Store{
+ csID: store,
+ },
+ }, "", frontendOCILayout, nil)
+ require.NoError(t, err)
+
+ for _, platform := range platformsToTest {
+ dt, err := os.ReadFile(filepath.Join(destDir, strings.ReplaceAll(platform, "/", "_"), "platform"))
+ require.NoError(t, err)
+ require.Equal(t, []byte(platform), dt)
+ }
+}
+
+func testFileOpRmWildcard(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateDir("foo", 0700),
+ fstest.CreateDir("bar", 0700),
+ fstest.CreateFile("foo/target", []byte("foo0"), 0600),
+ fstest.CreateFile("bar/target", []byte("bar0"), 0600),
+ fstest.CreateFile("bar/remaining", []byte("bar1"), 0600),
+ )
+ require.NoError(t, err)
+
+ st := llb.Scratch().File(
+ llb.Copy(llb.Local("mylocal"), "foo", "foo").
+ Copy(llb.Local("mylocal"), "bar", "bar"),
+ ).File(
+ llb.Rm("*/target", llb.WithAllowWildcard(true)),
+ )
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ LocalDirs: map[string]string{
+ "mylocal": dir,
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "bar/remaining"))
+ require.NoError(t, err)
+ require.Equal(t, []byte("bar1"), dt)
+
+ fi, err := os.Stat(filepath.Join(destDir, "foo"))
+ require.NoError(t, err)
+ require.Equal(t, true, fi.IsDir())
+
+ _, err = os.Stat(filepath.Join(destDir, "foo/target"))
+ require.ErrorIs(t, err, os.ErrNotExist)
+
+ _, err = os.Stat(filepath.Join(destDir, "bar/target"))
+ require.ErrorIs(t, err, os.ErrNotExist)
+}
+
+func testCallDiskUsage(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+ _, err = c.DiskUsage(sb.Context())
+ require.NoError(t, err)
+}
+
+func testBuildMultiMount(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ alpine := llb.Image("docker.io/library/alpine:latest")
+ ls := alpine.Run(llb.Shlex("/bin/ls -l"))
+ busybox := llb.Image("docker.io/library/busybox:latest")
+ cp := ls.Run(llb.Shlex("/bin/cp -a /busybox/etc/passwd baz"))
+ cp.AddMount("/busybox", busybox)
+
+ def, err := cp.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
+
+ checkAllReleasable(t, c, sb, true)
+}
+
+func testBuildExportScratch(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ st := llb.Scratch()
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/build/exporter:withnocompressed"
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "compression": "uncompressed",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+ cdAddress := sb.ContainerdAddress()
+ var client *containerd.Client
+ if cdAddress != "" {
+ client, err = newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+
+ img, err := client.GetImage(ctx, target)
+ require.NoError(t, err)
+ mfst, err := images.Manifest(ctx, client.ContentStore(), img.Target(), nil)
+ require.NoError(t, err)
+ require.Equal(t, 0, len(mfst.Layers))
+ err = client.ImageService().Delete(ctx, target, images.SynchronousDelete())
+ require.NoError(t, err)
+ }
+}
+
+func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time
+
+ resp := httpserver.Response{
+ Etag: identity.NewID(),
+ Content: []byte("content1"),
+ LastModified: &modTime,
+ }
+
+ server := httpserver.NewTestServer(map[string]httpserver.Response{
+ "/foo": resp,
+ })
+ defer server.Close()
+
+ // invalid URL first
+ st := llb.HTTP(server.URL + "/bar")
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "invalid response status 404")
+
+ // first correct request
+ st = llb.HTTP(server.URL + "/foo")
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
require.Equal(t, server.Stats("/foo").AllRequests, 1)
require.Equal(t, server.Stats("/foo").CachedRequests, 0)
- tmpdir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1736,7 +2181,7 @@ func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) {
require.Equal(t, server.Stats("/foo").AllRequests, 2)
require.Equal(t, server.Stats("/foo").CachedRequests, 1)
- dt, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(tmpdir, "foo"))
require.NoError(t, err)
require.Equal(t, []byte("content1"), dt)
@@ -1759,7 +2204,7 @@ func testBuildHTTPSource(t *testing.T, sb integration.Sandbox) {
require.Equal(t, server.Stats("/foo").AllRequests, 3)
require.Equal(t, server.Stats("/foo").CachedRequests, 1)
- dt, err = ioutil.ReadFile(filepath.Join(tmpdir, "bar"))
+ dt, err = os.ReadFile(filepath.Join(tmpdir, "bar"))
require.NoError(t, err)
require.Equal(t, []byte("content1"), dt)
@@ -1792,9 +2237,7 @@ func testResolveAndHosts(t *testing.T, sb integration.Sandbox) {
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1806,11 +2249,11 @@ func testResolveAndHosts(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "resolv.conf"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "resolv.conf"))
require.NoError(t, err)
require.Contains(t, string(dt), "nameserver")
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "hosts"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "hosts"))
require.NoError(t, err)
require.Contains(t, string(dt), "127.0.0.1 localhost")
}
@@ -1846,9 +2289,7 @@ func testUser(t *testing.T, sb integration.Sandbox) {
def, err := out.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
@@ -1860,32 +2301,32 @@ func testUser(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "user"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "user"))
require.NoError(t, err)
require.Equal(t, "daemon", strings.TrimSpace(string(dt)))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "group"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "group"))
require.NoError(t, err)
require.Equal(t, "daemon", strings.TrimSpace(string(dt)))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "nobody"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "nobody"))
require.NoError(t, err)
require.Equal(t, "nobody", strings.TrimSpace(string(dt)))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "userone"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "userone"))
require.NoError(t, err)
require.Equal(t, "1", strings.TrimSpace(string(dt)))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "root_supplementary"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "root_supplementary"))
require.NoError(t, err)
require.True(t, strings.HasPrefix(string(dt), "root "))
require.True(t, strings.Contains(string(dt), "wheel"))
- dt2, err := ioutil.ReadFile(filepath.Join(destDir, "default_supplementary"))
+ dt2, err := os.ReadFile(filepath.Join(destDir, "default_supplementary"))
require.NoError(t, err)
require.Equal(t, string(dt), string(dt2))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "default_uid"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "default_uid"))
require.NoError(t, err)
require.Equal(t, "0", strings.TrimSpace(string(dt)))
@@ -1893,7 +2334,7 @@ func testUser(t *testing.T, sb integration.Sandbox) {
}
func testOCIExporter(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -1913,9 +2354,7 @@ func testOCIExporter(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
for _, exp := range []string{ExporterOCI, ExporterDocker} {
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
out := filepath.Join(destDir, "out.tar")
outW, err := os.Create(out)
@@ -1936,7 +2375,7 @@ func testOCIExporter(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(out)
+ dt, err := os.ReadFile(out)
require.NoError(t, err)
m, err := testutil.ReadTarToMap(dt, false)
@@ -1995,199 +2434,270 @@ func testOCIExporter(t *testing.T, sb integration.Sandbox) {
checkAllReleasable(t, c, sb, true)
}
-func testFrontendMetadataReturn(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+func testOCIExporterContentStore(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
- res := gateway.NewResult()
- res.AddMeta("frontend.returned", []byte("true"))
- res.AddMeta("not-frontend.not-returned", []byte("false"))
- res.AddMeta("frontendnot.returned.either", []byte("false"))
- return res, nil
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
- res, err := c.Build(sb.Context(), SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterOCI,
- Attrs: map[string]string{},
- Output: fixedWriteCloser(nopWriteCloser{ioutil.Discard}),
- },
- },
- }, "", frontend, nil)
+ run(`sh -c "echo -n first > foo"`)
+ run(`sh -c "echo -n second > bar"`)
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- require.Contains(t, res.ExporterResponse, "frontend.returned")
- require.Equal(t, res.ExporterResponse["frontend.returned"], "true")
- require.NotContains(t, res.ExporterResponse, "not-frontend.not-returned")
- require.NotContains(t, res.ExporterResponse, "frontendnot.returned.either")
- checkAllReleasable(t, c, sb, true)
-}
-
-func testFrontendUseSolveResults(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
- require.NoError(t, err)
- defer c.Close()
-
- frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
- st := llb.Scratch().File(
- llb.Mkfile("foo", 0600, []byte("data")),
- )
- def, err := st.Marshal(sb.Context())
- if err != nil {
- return nil, err
- }
+ for _, exp := range []string{ExporterOCI, ExporterDocker} {
+ destDir := t.TempDir()
+ target := "example.com/buildkit/testoci:latest"
- res, err := c.Solve(ctx, gateway.SolveRequest{
- Definition: def.ToPB(),
- })
- if err != nil {
- return nil, err
+ outTar := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(outTar)
+ require.NoError(t, err)
+ attrs := map[string]string{}
+ if exp == ExporterDocker {
+ attrs["name"] = target
}
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: exp,
+ Attrs: attrs,
+ Output: fixedWriteCloser(outW),
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
- ref, err := res.SingleRef()
- if err != nil {
- return nil, err
+ outDir := filepath.Join(destDir, "out.d")
+ attrs = map[string]string{
+ "tar": "false",
}
-
- st2, err := ref.ToState()
- if err != nil {
- return nil, err
+ if exp == ExporterDocker {
+ attrs["name"] = target
}
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: exp,
+ Attrs: attrs,
+ OutputDir: outDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
- st = llb.Scratch().File(
- llb.Copy(st2, "foo", "foo2"),
- )
+ dt, err := os.ReadFile(outTar)
+ require.NoError(t, err)
+ m, err := testutil.ReadTarToMap(dt, false)
+ require.NoError(t, err)
- def, err = st.Marshal(sb.Context())
- if err != nil {
- return nil, err
- }
+ filepath.Walk(outDir, func(filename string, fi os.FileInfo, err error) error {
+ filename = strings.TrimPrefix(filename, outDir)
+ filename = strings.Trim(filename, "/")
+ if filename == "" || filename == "ingest" {
+ return nil
+ }
- return c.Solve(ctx, gateway.SolveRequest{
- Definition: def.ToPB(),
+ if fi.IsDir() {
+ require.Contains(t, m, filename+"/")
+ } else {
+ require.Contains(t, m, filename)
+ if filename == "index.json" {
+ // this file has a timestamp in it, so we can't compare
+ return nil
+ }
+ f, err := os.Open(path.Join(outDir, filename))
+ require.NoError(t, err)
+ data, err := io.ReadAll(f)
+ require.NoError(t, err)
+ require.Equal(t, m[filename].Data, data)
+ }
+ return nil
})
}
- destDir, err := ioutil.TempDir("", "buildkit")
+ checkAllReleasable(t, c, sb, true)
+}
+
+func testSourceDateEpochLayerTimestamps(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+ }
+
+ run(`sh -c "echo -n first > foo"`)
+ run(`sh -c "echo -n second > bar"`)
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir, err := os.MkdirTemp("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
- _, err = c.Build(sb.Context(), SolveOpt{
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+
+ tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ FrontendAttrs: map[string]string{
+ "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()),
+ },
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDir,
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
},
},
- }, "", frontend, nil)
+ }, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo2"))
+ dt, err := os.ReadFile(out)
require.NoError(t, err)
- require.Equal(t, dt, []byte("data"))
-}
-func skipDockerd(t *testing.T, sb integration.Sandbox) {
- // TODO: remove me once dockerd supports the image and exporter.
- t.Helper()
- if os.Getenv("TEST_DOCKERD") == "1" {
- t.Skip("dockerd missing a required exporter, cache exporter, or entitlement")
- }
+ tmsX, err := readImageTimestamps(dt)
+ require.NoError(t, err)
+ tms := tmsX.FromImage
+
+ require.Equal(t, len(tms), 3)
+
+ expected := tm.UTC().Format(time.RFC3339Nano)
+ require.Equal(t, expected, tms[0])
+ require.Equal(t, expected, tms[1])
+ require.Equal(t, expected, tms[2])
+
+ checkAllReleasable(t, c, sb, true)
}
-func testExporterTargetExists(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+func testSourceDateEpochClamp(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- st := llb.Image("busybox:latest")
- def, err := st.Marshal(sb.Context())
+ var bboxConfig []byte
+ _, err = c.Build(sb.Context(), SolveOpt{}, "", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ _, bboxConfig, err = c.ResolveImageConfig(ctx, "docker.io/library/busybox:latest", llb.ResolveImageConfigOpt{})
+ if err != nil {
+ return nil, err
+ }
+ return nil, nil
+ }, nil)
require.NoError(t, err)
- var mdDgst string
- res, err := c.Solve(sb.Context(), def, SolveOpt{
+ m := map[string]json.RawMessage{}
+ require.NoError(t, json.Unmarshal(bboxConfig, &m))
+ delete(m, "created")
+ bboxConfig, err = json.Marshal(m)
+ require.NoError(t, err)
+
+ busybox, err := llb.Image("busybox:latest").WithImageConfig(bboxConfig)
+ require.NoError(t, err)
+
+ def, err := busybox.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(destDir)
+
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterOCI,
- Attrs: map[string]string{},
- Output: func(m map[string]string) (io.WriteCloser, error) {
- mdDgst = m[exptypes.ExporterImageDigestKey]
- return nil, nil
+ Type: ExporterOCI,
+ Attrs: map[string]string{
+ exptypes.ExporterImageConfigKey: string(bboxConfig),
},
+ Output: fixedWriteCloser(outW),
},
},
}, nil)
require.NoError(t, err)
- dgst := res.ExporterResponse[exptypes.ExporterImageDigestKey]
- require.True(t, strings.HasPrefix(dgst, "sha256:"))
- require.Equal(t, dgst, mdDgst)
+ dt, err := os.ReadFile(out)
+ require.NoError(t, err)
- require.True(t, strings.HasPrefix(res.ExporterResponse[exptypes.ExporterImageConfigDigestKey], "sha256:"))
-}
+ busyboxTmsX, err := readImageTimestamps(dt)
+ require.NoError(t, err)
+ busyboxTms := busyboxTmsX.FromImage
-func testTarExporterWithSocket(t *testing.T, sb integration.Sandbox) {
- if os.Getenv("TEST_DOCKERD") == "1" {
- t.Skip("tar exporter is temporarily broken on dockerd")
- }
+ require.True(t, len(busyboxTms) > 1)
+ bboxLayerLen := len(busyboxTms) - 1
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ tm, err := time.Parse(time.RFC3339Nano, busyboxTms[1])
require.NoError(t, err)
- defer c.Close()
- alpine := llb.Image("docker.io/library/alpine:latest")
- def, err := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/socket.sock & usleep 100000; kill %1"})).Marshal(sb.Context())
+ next := tm.Add(time.Hour).Truncate(time.Second)
+
+ st := busybox.Run(llb.Shlex("touch /foo"))
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ out = filepath.Join(destDir, "out.tar")
+ outW, err = os.Create(out)
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
+ FrontendAttrs: map[string]string{
+ "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", next.Unix()),
+ },
Exports: []ExportEntry{
{
- Type: ExporterTar,
- Attrs: map[string]string{},
- Output: func(m map[string]string) (io.WriteCloser, error) {
- return nopWriteCloser{ioutil.Discard}, nil
+ Type: ExporterOCI,
+ Attrs: map[string]string{
+ exptypes.ExporterImageConfigKey: string(bboxConfig),
},
+ Output: fixedWriteCloser(outW),
},
},
}, nil)
require.NoError(t, err)
-}
-
-func testTarExporterWithSocketCopy(t *testing.T, sb integration.Sandbox) {
- if os.Getenv("TEST_DOCKERD") == "1" {
- t.Skip("tar exporter is temporarily broken on dockerd")
- }
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ dt, err = os.ReadFile(out)
require.NoError(t, err)
- defer c.Close()
-
- alpine := llb.Image("docker.io/library/alpine:latest")
- state := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/root/socket.sock & usleep 100000; kill %1"})).Root()
- fa := llb.Copy(state, "/root", "/roo2", &llb.CopyInfo{})
+ tmsX, err := readImageTimestamps(dt)
+ require.NoError(t, err)
+ tms := tmsX.FromImage
- scratchCopy := llb.Scratch().File(fa)
+ require.Equal(t, len(tms), bboxLayerLen+2)
- def, err := scratchCopy.Marshal(sb.Context())
- require.NoError(t, err)
+ expected := next.UTC().Format(time.RFC3339Nano)
+ require.Equal(t, expected, tms[0])
+ require.Equal(t, busyboxTms[1], tms[1])
+ require.Equal(t, expected, tms[bboxLayerLen+1])
+ require.Equal(t, expected, tmsX.FromAnnotation)
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
}
-// moby/buildkit#1418
-func testTarExporterSymlink(t *testing.T, sb integration.Sandbox) {
+// testSourceDateEpochReset tests that the SOURCE_DATE_EPOCH is reset if exporter option is set
+func testSourceDateEpochReset(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -2200,507 +2710,346 @@ func testTarExporterSymlink(t *testing.T, sb integration.Sandbox) {
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
- run(`sh -c "echo -n first > foo;ln -s foo bar"`)
+ run(`sh -c "echo -n first > foo"`)
+ run(`sh -c "echo -n second > bar"`)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- var buf bytes.Buffer
+ destDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(destDir)
+
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+
+ tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC)
+
_, err = c.Solve(sb.Context(), def, SolveOpt{
+ FrontendAttrs: map[string]string{
+ "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()),
+ },
Exports: []ExportEntry{
{
- Type: ExporterTar,
- Output: fixedWriteCloser(&nopWriteCloser{&buf}),
+ Type: ExporterOCI,
+ Attrs: map[string]string{"source-date-epoch": ""},
+ Output: fixedWriteCloser(outW),
},
},
}, nil)
require.NoError(t, err)
- m, err := testutil.ReadTarToMap(buf.Bytes(), false)
+ dt, err := os.ReadFile(out)
require.NoError(t, err)
- item, ok := m["foo"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
- require.Equal(t, []byte("first"), item.Data)
+ tmsX, err := readImageTimestamps(dt)
+ require.NoError(t, err)
+ tms := tmsX.FromImage
- item, ok = m["bar"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeSymlink)
- require.Equal(t, "foo", item.Header.Linkname)
-}
+ require.Equal(t, len(tms), 3)
-func testBuildExportWithForeignLayer(t *testing.T, sb integration.Sandbox) {
- if os.Getenv("TEST_DOCKERD") == "1" {
- t.Skip("image exporter is missing in dockerd")
- }
+ expected := tm.UTC().Format(time.RFC3339Nano)
+ require.NotEqual(t, expected, tms[0])
+ require.NotEqual(t, expected, tms[1])
+ require.NotEqual(t, expected, tms[2])
+
+ require.Equal(t, tms[0], tms[2])
+ require.NotEqual(t, tms[2], tms[1])
+
+ checkAllReleasable(t, c, sb, true)
+}
+func testSourceDateEpochLocalExporter(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureSourceDateEpoch)
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- st := llb.Image("cpuguy83/buildkit-foreign:latest")
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+ }
+
+ run(`sh -c "echo -n first > foo"`)
+ run(`sh -c "echo -n second > bar"`)
+
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- t.Run("propagate=1", func(t *testing.T) {
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
+ destDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(destDir)
- target := registry + "/buildkit/build/exporter/foreign:latest"
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- "prefer-nondist-layers": "true",
- },
- },
- },
- }, nil)
- require.NoError(t, err)
-
- ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
-
- resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
- name, desc, err := resolver.Resolve(ctx, target)
- require.NoError(t, err)
+ tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC)
- fetcher, err := resolver.Fetcher(ctx, name)
- require.NoError(t, err)
- mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any())
- require.NoError(t, err)
-
- require.Equal(t, 2, len(mfst.Layers))
- require.Equal(t, images.MediaTypeDockerSchema2LayerForeign, mfst.Layers[0].MediaType)
- require.Len(t, mfst.Layers[0].URLs, 1)
- require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType)
-
- rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size})
- require.NoError(t, err)
- defer rc.Close()
-
- // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request.
- // The request is only made when we attempt to read from the reader.
- buf := make([]byte, 1)
- _, err = rc.Read(buf)
- require.Truef(t, ctderrdefs.IsNotFound(err), "expected error for blob that should not be in registry: %s, %v", mfst.Layers[0].Digest, err)
- })
- t.Run("propagate=0", func(t *testing.T) {
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
- target := registry + "/buildkit/build/exporter/noforeign:latest"
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- },
- },
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ FrontendAttrs: map[string]string{
+ "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()),
+ },
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
},
- }, nil)
- require.NoError(t, err)
-
- ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
-
- resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
- name, desc, err := resolver.Resolve(ctx, target)
- require.NoError(t, err)
-
- fetcher, err := resolver.Fetcher(ctx, name)
- require.NoError(t, err)
-
- mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any())
- require.NoError(t, err)
+ },
+ }, nil)
+ require.NoError(t, err)
- require.Equal(t, 2, len(mfst.Layers))
- require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType)
- require.Len(t, mfst.Layers[0].URLs, 0)
- require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType)
+ fi, err := os.Stat(filepath.Join(destDir, "foo"))
+ require.NoError(t, err)
+ require.Equal(t, fi.ModTime().Format(time.RFC3339), tm.UTC().Format(time.RFC3339))
- rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size})
- require.NoError(t, err)
- defer rc.Close()
+ fi, err = os.Stat(filepath.Join(destDir, "bar"))
+ require.NoError(t, err)
+ require.Equal(t, fi.ModTime().Format(time.RFC3339), tm.UTC().Format(time.RFC3339))
- // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request.
- // The request is only made when we attempt to read from the reader.
- buf := make([]byte, 1)
- _, err = rc.Read(buf)
- require.NoError(t, err)
- })
+ checkAllReleasable(t, c, sb, true)
}
-func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
- if os.Getenv("TEST_DOCKERD") == "1" {
- t.Skip("image exporter is missing in dockerd")
- }
+func testSourceDateEpochTarExporter(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureSourceDateEpoch)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
- cmd := `sh -e -c "echo -n uncompressed > data"`
-
st := llb.Scratch()
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+ }
+
+ run(`sh -c "echo -n first > foo"`)
+ run(`sh -c "echo -n second > bar"`)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
+ destDir, err := os.MkdirTemp("", "buildkit")
require.NoError(t, err)
+ defer os.RemoveAll(destDir)
- target := registry + "/buildkit/build/exporter:withnocompressed"
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+
+ tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC)
_, err = c.Solve(sb.Context(), def, SolveOpt{
+ FrontendAttrs: map[string]string{
+ "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()),
+ },
Exports: []ExportEntry{
{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- "compression": "uncompressed",
- },
+ Type: ExporterTar,
+ Output: fixedWriteCloser(outW),
},
},
}, nil)
require.NoError(t, err)
- ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
- cdAddress := sb.ContainerdAddress()
- var client *containerd.Client
- if cdAddress != "" {
- client, err = newContainerd(cdAddress)
- require.NoError(t, err)
- defer client.Close()
+ dt, err := os.ReadFile(out)
+ require.NoError(t, err)
- img, err := client.GetImage(ctx, target)
- require.NoError(t, err)
- mfst, err := images.Manifest(ctx, client.ContentStore(), img.Target(), nil)
- require.NoError(t, err)
- require.Equal(t, 1, len(mfst.Layers))
- require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType)
- }
+ m, err := testutil.ReadTarToMap(dt, false)
+ require.NoError(t, err)
- // new layer with gzip compression
- targetImg := llb.Image(target)
- cmd = `sh -e -c "echo -n gzip > data"`
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", targetImg)
+ require.Equal(t, len(m), 2)
- def, err = st.Marshal(sb.Context())
- require.NoError(t, err)
+ require.Equal(t, tm.Format(time.RFC3339), m["foo"].Header.ModTime.Format(time.RFC3339))
+ require.Equal(t, tm.Format(time.RFC3339), m["bar"].Header.ModTime.Format(time.RFC3339))
- compressedTarget := registry + "/buildkit/build/exporter:withcompressed"
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": compressedTarget,
- "push": "true",
- },
- },
- },
- }, nil)
+ checkAllReleasable(t, c, sb, true)
+}
+func testFrontendMetadataReturn(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
+ defer c.Close()
- allCompressedTarget := registry + "/buildkit/build/exporter:withallcompressed"
- _, err = c.Solve(context.TODO(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": allCompressedTarget,
- "push": "true",
- "compression": "gzip",
- "force-compression": "true",
- },
- },
- },
- }, nil)
- require.NoError(t, err)
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ res.AddMeta("frontend.returned", []byte("true"))
+ res.AddMeta("not-frontend.not-returned", []byte("false"))
+ res.AddMeta("frontendnot.returned.either", []byte("false"))
+ return res, nil
+ }
- if cdAddress == "" {
- t.Skip("rest of test requires containerd worker")
+ var exports []ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": "reg.dummy:5000/buildkit/test:latest",
+ },
+ }}
+ } else {
+ exports = []ExportEntry{{
+ Type: ExporterOCI,
+ Attrs: map[string]string{},
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
+ }}
}
- err = client.ImageService().Delete(ctx, target, images.SynchronousDelete())
- require.NoError(t, err)
- err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
- require.NoError(t, err)
- err = client.ImageService().Delete(ctx, allCompressedTarget, images.SynchronousDelete())
+ res, err := c.Build(sb.Context(), SolveOpt{
+ Exports: exports,
+ }, "", frontend, nil)
require.NoError(t, err)
-
+ require.Contains(t, res.ExporterResponse, "frontend.returned")
+ require.Equal(t, res.ExporterResponse["frontend.returned"], "true")
+ require.NotContains(t, res.ExporterResponse, "not-frontend.not-returned")
+ require.NotContains(t, res.ExporterResponse, "frontendnot.returned.either")
checkAllReleasable(t, c, sb, true)
+}
- // check if the new layer is compressed with compression option
- img, err := client.Pull(ctx, compressedTarget)
+func testFrontendUseSolveResults(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
+ defer c.Close()
- dt, err := content.ReadBlob(ctx, img.ContentStore(), img.Target())
- require.NoError(t, err)
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.Scratch().File(
+ llb.Mkfile("foo", 0600, []byte("data")),
+ )
- var mfst = struct {
- MediaType string `json:"mediaType,omitempty"`
- ocispecs.Manifest
- }{}
+ def, err := st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
- err = json.Unmarshal(dt, &mfst)
- require.NoError(t, err)
- require.Equal(t, 2, len(mfst.Layers))
- require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType)
- require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType)
+ res, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
- dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest})
- require.NoError(t, err)
+ ref, err := res.SingleRef()
+ if err != nil {
+ return nil, err
+ }
- m, err := testutil.ReadTarToMap(dt, false)
- require.NoError(t, err)
+ st2, err := ref.ToState()
+ if err != nil {
+ return nil, err
+ }
- item, ok := m["data"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
- require.Equal(t, []byte("uncompressed"), item.Data)
+ st = llb.Scratch().File(
+ llb.Copy(st2, "foo", "foo2"),
+ )
- dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest})
- require.NoError(t, err)
-
- m, err = testutil.ReadTarToMap(dt, true)
- require.NoError(t, err)
-
- item, ok = m["data"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
- require.Equal(t, []byte("gzip"), item.Data)
-
- err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
- require.NoError(t, err)
-
- checkAllReleasable(t, c, sb, true)
-
- // check if all layers are compressed with force-compressoin option
- img, err = client.Pull(ctx, allCompressedTarget)
- require.NoError(t, err)
-
- dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target())
- require.NoError(t, err)
-
- mfst = struct {
- MediaType string `json:"mediaType,omitempty"`
- ocispecs.Manifest
- }{}
-
- err = json.Unmarshal(dt, &mfst)
- require.NoError(t, err)
- require.Equal(t, 2, len(mfst.Layers))
- require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType)
- require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType)
-
- dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest})
- require.NoError(t, err)
+ def, err = st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
- m, err = testutil.ReadTarToMap(dt, true)
- require.NoError(t, err)
+ return c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ }
- item, ok = m["data"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
- require.Equal(t, []byte("uncompressed"), item.Data)
+ destDir := t.TempDir()
- dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest})
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, "", frontend, nil)
require.NoError(t, err)
- m, err = testutil.ReadTarToMap(dt, true)
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo2"))
require.NoError(t, err)
-
- item, ok = m["data"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
- require.Equal(t, []byte("gzip"), item.Data)
+ require.Equal(t, dt, []byte("data"))
}
-func testBuildExportZstd(t *testing.T, sb integration.Sandbox) {
+func testExporterTargetExists(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- busybox := llb.Image("busybox:latest")
- cmd := `sh -e -c "echo -n zstd > data"`
-
- st := llb.Scratch()
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
-
+ st := llb.Image("busybox:latest")
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{
+ var mdDgst string
+ res, err := c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterOCI,
- Output: fixedWriteCloser(outW),
- Attrs: map[string]string{
- "compression": "zstd",
+ Type: ExporterOCI,
+ Attrs: map[string]string{},
+ Output: func(m map[string]string) (io.WriteCloser, error) {
+ mdDgst = m[exptypes.ExporterImageDigestKey]
+ return nil, nil
},
},
},
- // compression option should work even with inline cache exports
- CacheExports: []CacheOptionsEntry{
- {
- Type: "inline",
- },
- },
}, nil)
require.NoError(t, err)
+ dgst := res.ExporterResponse[exptypes.ExporterImageDigestKey]
- dt, err := ioutil.ReadFile(out)
- require.NoError(t, err)
-
- m, err := testutil.ReadTarToMap(dt, false)
- require.NoError(t, err)
+ require.True(t, strings.HasPrefix(dgst, "sha256:"))
+ require.Equal(t, dgst, mdDgst)
- var index ocispecs.Index
- err = json.Unmarshal(m["index.json"].Data, &index)
- require.NoError(t, err)
+ require.True(t, strings.HasPrefix(res.ExporterResponse[exptypes.ExporterImageConfigDigestKey], "sha256:"))
+}
- var mfst ocispecs.Manifest
- err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
+func testTarExporterWithSocket(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
+ defer c.Close()
- lastLayer := mfst.Layers[len(mfst.Layers)-1]
- require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType)
-
- zstdLayerDigest := lastLayer.Digest.Hex()
- require.Equal(t, m["blobs/sha256/"+zstdLayerDigest].Data[:4], []byte{0x28, 0xb5, 0x2f, 0xfd})
-
- // repeat without oci mediatype
- outW, err = os.Create(out)
+ alpine := llb.Image("docker.io/library/alpine:latest")
+ def, err := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/socket.sock & usleep 100000; kill %1"})).Marshal(sb.Context())
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterOCI,
- Output: fixedWriteCloser(outW),
- Attrs: map[string]string{
- "compression": "zstd",
- "oci-mediatypes": "false",
+ Type: ExporterTar,
+ Attrs: map[string]string{},
+ Output: func(m map[string]string) (io.WriteCloser, error) {
+ return nopWriteCloser{io.Discard}, nil
},
},
},
}, nil)
require.NoError(t, err)
-
- dt, err = ioutil.ReadFile(out)
- require.NoError(t, err)
-
- m, err = testutil.ReadTarToMap(dt, false)
- require.NoError(t, err)
-
- err = json.Unmarshal(m["index.json"].Data, &index)
- require.NoError(t, err)
-
- err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
- require.NoError(t, err)
-
- lastLayer = mfst.Layers[len(mfst.Layers)-1]
- require.Equal(t, images.MediaTypeDockerSchema2Layer+".zstd", lastLayer.MediaType)
-
- require.Equal(t, lastLayer.Digest.Hex(), zstdLayerDigest)
}
-func testPullZstdImage(t *testing.T, sb integration.Sandbox) {
+func testTarExporterWithSocketCopy(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- busybox := llb.Image("busybox:latest")
- cmd := `sh -e -c "echo -n zstd > data"`
-
- st := llb.Scratch()
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
-
- def, err := st.Marshal(sb.Context())
- require.NoError(t, err)
-
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
-
- target := registry + "/buildkit/build/exporter:zstd"
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- "compression": "zstd",
-
- // containerd applier supports only zstd with oci-mediatype.
- "oci-mediatypes": "true",
- },
- },
- },
- }, nil)
- require.NoError(t, err)
-
- if sb.Name() == "containerd-1.4" {
- // containerd 1.4 doesn't support zstd compression
- return
- }
-
- ensurePruneAll(t, c, sb)
-
- st = llb.Scratch().File(llb.Copy(llb.Image(target), "/data", "/zdata"))
+ alpine := llb.Image("docker.io/library/alpine:latest")
+ state := alpine.Run(llb.Args([]string{"sh", "-c", "nc -l -s local:/root/socket.sock & usleep 100000; kill %1"})).Root()
- def, err = st.Marshal(sb.Context())
- require.NoError(t, err)
+ fa := llb.Copy(state, "/root", "/roo2", &llb.CopyInfo{})
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ scratchCopy := llb.Scratch().File(fa)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- }, nil)
+ def, err := scratchCopy.Marshal(sb.Context())
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "zdata"))
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
require.NoError(t, err)
- require.Equal(t, dt, []byte("zstd"))
}
-func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+
+// moby/buildkit#1418
+func testTarExporterSymlink(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -2713,130 +3062,253 @@ func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
- run(`sh -e -c "mkdir -p foo/sub; echo -n first > foo/sub/bar; chmod 0741 foo;"`)
- run(`true`) // this doesn't create a layer
- run(`sh -c "echo -n second > foo/sub/baz"`)
+ run(`sh -c "echo -n first > foo;ln -s foo bar"`)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
-
- target := registry + "/buildkit/testpush:latest"
-
+ var buf bytes.Buffer
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- },
+ Type: ExporterTar,
+ Output: fixedWriteCloser(&nopWriteCloser{&buf}),
},
},
}, nil)
require.NoError(t, err)
- // test existence of the image with next build
- firstBuild := llb.Image(target)
-
- def, err = firstBuild.Marshal(sb.Context())
+ m, err := testutil.ReadTarToMap(buf.Bytes(), false)
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ item, ok := m["foo"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
+ require.Equal(t, []byte("first"), item.Data)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- }, nil)
- require.NoError(t, err)
+ item, ok = m["bar"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeSymlink)
+ require.Equal(t, "foo", item.Header.Linkname)
+}
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo/sub/bar"))
+func testBuildExportWithForeignLayer(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
- require.Equal(t, dt, []byte("first"))
+ defer c.Close()
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo/sub/baz"))
+ st := llb.Image("cpuguy83/buildkit-foreign:latest")
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- require.Equal(t, dt, []byte("second"))
- fi, err := os.Stat(filepath.Join(destDir, "foo"))
- require.NoError(t, err)
- require.Equal(t, 0741, int(fi.Mode()&0777))
+ t.Run("propagate=1", func(t *testing.T) {
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
- checkAllReleasable(t, c, sb, false)
+ target := registry + "/buildkit/build/exporter/foreign:latest"
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "prefer-nondist-layers": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
- // examine contents of exported tars (requires containerd)
- cdAddress := sb.ContainerdAddress()
- if cdAddress == "" {
- t.Skip("rest of test requires containerd worker")
- }
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
- // TODO: make public pull helper function so this can be checked for standalone as well
+ resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
+ name, desc, err := resolver.Resolve(ctx, target)
+ require.NoError(t, err)
- client, err := newContainerd(cdAddress)
+ fetcher, err := resolver.Fetcher(ctx, name)
+ require.NoError(t, err)
+ mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any())
+ require.NoError(t, err)
+
+ require.Equal(t, 2, len(mfst.Layers))
+ require.Equal(t, images.MediaTypeDockerSchema2LayerForeign, mfst.Layers[0].MediaType)
+ require.Len(t, mfst.Layers[0].URLs, 1)
+ require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType)
+
+ rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size})
+ require.NoError(t, err)
+ defer rc.Close()
+
+ // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request.
+ // The request is only made when we attempt to read from the reader.
+ buf := make([]byte, 1)
+ _, err = rc.Read(buf)
+ require.Truef(t, ctderrdefs.IsNotFound(err), "expected error for blob that should not be in registry: %s, %v", mfst.Layers[0].Digest, err)
+ })
+ t.Run("propagate=0", func(t *testing.T) {
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+ target := registry + "/buildkit/build/exporter/noforeign:latest"
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+
+ resolver := docker.NewResolver(docker.ResolverOptions{PlainHTTP: true})
+ name, desc, err := resolver.Resolve(ctx, target)
+ require.NoError(t, err)
+
+ fetcher, err := resolver.Fetcher(ctx, name)
+ require.NoError(t, err)
+
+ mfst, err := images.Manifest(ctx, contentutil.FromFetcher(fetcher), desc, platforms.Any())
+ require.NoError(t, err)
+
+ require.Equal(t, 2, len(mfst.Layers))
+ require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType)
+ require.Len(t, mfst.Layers[0].URLs, 0)
+ require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[1].MediaType)
+
+ rc, err := fetcher.Fetch(ctx, ocispecs.Descriptor{Digest: mfst.Layers[0].Digest, Size: mfst.Layers[0].Size})
+ require.NoError(t, err)
+ defer rc.Close()
+
+ // `Fetch` doesn't error (in the docker resolver), it just returns a reader immediately and does not make a request.
+ // The request is only made when we attempt to read from the reader.
+ buf := make([]byte, 1)
+ _, err = rc.Read(buf)
+ require.NoError(t, err)
+ })
+}
+
+func testBuildExportWithUncompressed(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
- defer client.Close()
+ defer c.Close()
- ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+ busybox := llb.Image("busybox:latest")
+ cmd := `sh -e -c "echo -n uncompressed > data"`
- // check image in containerd
- _, err = client.ImageService().Get(ctx, target)
+ st := llb.Scratch()
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- // deleting image should release all content
- err = client.ImageService().Delete(ctx, target, images.SynchronousDelete())
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ target := registry + "/buildkit/build/exporter:withnocompressed"
- img, err := client.Pull(ctx, target)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "compression": "uncompressed",
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- desc, err := img.Config(ctx)
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+ cdAddress := sb.ContainerdAddress()
+ var client *containerd.Client
+ if cdAddress != "" {
+ client, err = newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+
+ img, err := client.GetImage(ctx, target)
+ require.NoError(t, err)
+ mfst, err := images.Manifest(ctx, client.ContentStore(), img.Target(), nil)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(mfst.Layers))
+ require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType)
+ }
+
+ // new layer with gzip compression
+ targetImg := llb.Image(target)
+ cmd = `sh -e -c "echo -n gzip > data"`
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", targetImg)
+
+ def, err = st.Marshal(sb.Context())
require.NoError(t, err)
- dt, err = content.ReadBlob(ctx, img.ContentStore(), desc)
+ compressedTarget := registry + "/buildkit/build/exporter:withcompressed"
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": compressedTarget,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- var ociimg ocispecs.Image
- err = json.Unmarshal(dt, &ociimg)
+ allCompressedTarget := registry + "/buildkit/build/exporter:withallcompressed"
+ _, err = c.Solve(context.TODO(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": allCompressedTarget,
+ "push": "true",
+ "compression": "gzip",
+ "force-compression": "true",
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- require.NotEqual(t, "", ociimg.OS)
- require.NotEqual(t, "", ociimg.Architecture)
- require.NotEqual(t, "", ociimg.Config.WorkingDir)
- require.Equal(t, "layers", ociimg.RootFS.Type)
- require.Equal(t, 3, len(ociimg.RootFS.DiffIDs))
- require.NotNil(t, ociimg.Created)
- require.True(t, time.Since(*ociimg.Created) < 2*time.Minute)
- require.Condition(t, func() bool {
- for _, env := range ociimg.Config.Env {
- if strings.HasPrefix(env, "PATH=") {
- return true
- }
- }
- return false
- })
+ if cdAddress == "" {
+ t.Skip("rest of test requires containerd worker")
+ }
- require.Equal(t, 3, len(ociimg.History))
- require.Contains(t, ociimg.History[0].CreatedBy, "foo/sub/bar")
- require.Contains(t, ociimg.History[1].CreatedBy, "true")
- require.Contains(t, ociimg.History[2].CreatedBy, "foo/sub/baz")
- require.False(t, ociimg.History[0].EmptyLayer)
- require.False(t, ociimg.History[1].EmptyLayer)
- require.False(t, ociimg.History[2].EmptyLayer)
+ err = client.ImageService().Delete(ctx, target, images.SynchronousDelete())
+ require.NoError(t, err)
+ err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
+ require.NoError(t, err)
+ err = client.ImageService().Delete(ctx, allCompressedTarget, images.SynchronousDelete())
+ require.NoError(t, err)
- dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target())
+ checkAllReleasable(t, c, sb, true)
+
+ // check if the new layer is compressed with compression option
+ img, err := client.Pull(ctx, compressedTarget)
+ require.NoError(t, err)
+
+ dt, err := content.ReadBlob(ctx, img.ContentStore(), img.Target())
require.NoError(t, err)
var mfst = struct {
@@ -2846,947 +3318,625 @@ func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
err = json.Unmarshal(dt, &mfst)
require.NoError(t, err)
-
- require.Equal(t, images.MediaTypeDockerSchema2Manifest, mfst.MediaType)
- require.Equal(t, 3, len(mfst.Layers))
- require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType)
+ require.Equal(t, 2, len(mfst.Layers))
+ require.Equal(t, images.MediaTypeDockerSchema2Layer, mfst.Layers[0].MediaType)
require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType)
dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest})
require.NoError(t, err)
- m, err := testutil.ReadTarToMap(dt, true)
+ m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
- item, ok := m["foo/"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
- require.Equal(t, 0741, int(item.Header.Mode&0777))
-
- item, ok = m["foo/sub/"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
-
- item, ok = m["foo/sub/bar"]
+ item, ok := m["data"]
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
- require.Equal(t, []byte("first"), item.Data)
-
- _, ok = m["foo/sub/baz"]
- require.False(t, ok)
+ require.Equal(t, []byte("uncompressed"), item.Data)
- dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[2].Digest})
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest})
require.NoError(t, err)
m, err = testutil.ReadTarToMap(dt, true)
require.NoError(t, err)
- item, ok = m["foo/sub/baz"]
+ item, ok = m["data"]
require.True(t, ok)
require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
- require.Equal(t, []byte("second"), item.Data)
+ require.Equal(t, []byte("gzip"), item.Data)
- item, ok = m["foo/"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
- require.Equal(t, 0741, int(item.Header.Mode&0777))
+ err = client.ImageService().Delete(ctx, compressedTarget, images.SynchronousDelete())
+ require.NoError(t, err)
- item, ok = m["foo/sub/"]
- require.True(t, ok)
- require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
+ checkAllReleasable(t, c, sb, true)
- _, ok = m["foo/sub/bar"]
- require.False(t, ok)
-}
+ // check if all layers are compressed with force-compressoin option
+ img, err = client.Pull(ctx, allCompressedTarget)
+ require.NoError(t, err)
-func testStargzLazyRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- requiresLinux(t)
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target())
+ require.NoError(t, err)
- cdAddress := sb.ContainerdAddress()
- if cdAddress == "" || sb.Snapshotter() != "stargz" {
- t.Skip("test requires containerd worker with stargz snapshotter")
- }
+ mfst = struct {
+ MediaType string `json:"mediaType,omitempty"`
+ ocispecs.Manifest
+ }{}
- client, err := newContainerd(cdAddress)
+ err = json.Unmarshal(dt, &mfst)
require.NoError(t, err)
- defer client.Close()
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
+ require.Equal(t, 2, len(mfst.Layers))
+ require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType)
+ require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType)
+
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest})
require.NoError(t, err)
- var (
- imageService = client.ImageService()
- contentStore = client.ContentStore()
- ctx = namespaces.WithNamespace(sb.Context(), "buildkit")
- )
+ m, err = testutil.ReadTarToMap(dt, true)
+ require.NoError(t, err)
+
+ item, ok = m["data"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
+ require.Equal(t, []byte("uncompressed"), item.Data)
+
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[1].Digest})
+ require.NoError(t, err)
+
+ m, err = testutil.ReadTarToMap(dt, true)
+ require.NoError(t, err)
+
+ item, ok = m["data"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
+ require.Equal(t, []byte("gzip"), item.Data)
+}
+func testBuildExportZstd(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ busybox := llb.Image("busybox:latest")
+ cmd := `sh -e -c "echo -n zstd > data"`
- // Prepare stargz registry cache
- orgImage := "docker.io/library/alpine:latest"
- sgzCache := registry + "/stargz/alpinecache:" + identity.NewID()
- baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"}))
- def, err := baseDef.Marshal(sb.Context())
+ st := llb.Scratch()
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
+
+ destDir := t.TempDir()
+
out := filepath.Join(destDir, "out.tar")
outW, err := os.Create(out)
require.NoError(t, err)
+
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterOCI,
Output: fixedWriteCloser(outW),
- },
- },
- CacheExports: []CacheOptionsEntry{
- {
- Type: "registry",
- Attrs: map[string]string{
- "ref": sgzCache,
- "compression": "estargz",
- "oci-mediatypes": "true",
- "force-compression": "true",
- },
- },
- },
- }, nil)
- require.NoError(t, err)
-
- // clear all local state out
- ensurePruneAll(t, c, sb)
-
- // stargz layers should be lazy even for executing something on them
- def, err = baseDef.
- Run(llb.Args([]string{"/bin/touch", "/bar"})).
- Marshal(sb.Context())
- require.NoError(t, err)
- target := registry + "/buildkit/testlazyimage:" + identity.NewID()
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
Attrs: map[string]string{
- "name": target,
- "push": "true",
+ "compression": "zstd",
},
},
},
- CacheImports: []CacheOptionsEntry{
+ // compression option should work even with inline cache exports
+ CacheExports: []CacheOptionsEntry{
{
- Type: "registry",
- Attrs: map[string]string{
- "ref": sgzCache,
- },
+ Type: "inline",
},
},
}, nil)
require.NoError(t, err)
- img, err := imageService.Get(ctx, target)
+ dt, err := os.ReadFile(out)
require.NoError(t, err)
- manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
+ m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
- // Check if image layers are lazy.
- // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer.
- var sgzLayers []ocispecs.Descriptor
- for i, layer := range manifest.Layers[:len(manifest.Layers)-1] {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i)
- sgzLayers = append(sgzLayers, layer)
- }
- require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull")
-
- // The topmost(last) layer created by `Run` shouldn't be lazy
- _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest)
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
- // clear all local state out
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ var mfst ocispecs.Manifest
+ err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
- // stargz layers should be exportable
- out = filepath.Join(destDir, "out2.tar")
+ lastLayer := mfst.Layers[len(mfst.Layers)-1]
+ require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType)
+
+ zstdLayerDigest := lastLayer.Digest.Hex()
+ require.Equal(t, m["blobs/sha256/"+zstdLayerDigest].Data[:4], []byte{0x28, 0xb5, 0x2f, 0xfd})
+
+ // repeat without oci mediatype
outW, err = os.Create(out)
require.NoError(t, err)
+
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterOCI,
Output: fixedWriteCloser(outW),
- },
- },
- CacheImports: []CacheOptionsEntry{
- {
- Type: "registry",
Attrs: map[string]string{
- "ref": sgzCache,
+ "compression": "zstd",
+ "oci-mediatypes": "false",
},
},
},
}, nil)
require.NoError(t, err)
- // Check if image layers are un-lazied
- for _, layer := range sgzLayers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.NoError(t, err)
- }
+ dt, err = os.ReadFile(out)
+ require.NoError(t, err)
- ensurePruneAll(t, c, sb)
+ m, err = testutil.ReadTarToMap(dt, false)
+ require.NoError(t, err)
+
+ err = json.Unmarshal(m["index.json"].Data, &index)
+ require.NoError(t, err)
+
+ err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
+ require.NoError(t, err)
+
+ lastLayer = mfst.Layers[len(mfst.Layers)-1]
+ require.Equal(t, images.MediaTypeDockerSchema2Layer+".zstd", lastLayer.MediaType)
+
+ require.Equal(t, lastLayer.Digest.Hex(), zstdLayerDigest)
}
-func testStargzLazyInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- requiresLinux(t)
+func testPullZstdImage(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
- cdAddress := sb.ContainerdAddress()
- if cdAddress == "" || sb.Snapshotter() != "stargz" {
- t.Skip("test requires containerd worker with stargz snapshotter")
- }
+ busybox := llb.Image("busybox:latest")
+ cmd := `sh -e -c "echo -n zstd > data"`
- client, err := newContainerd(cdAddress)
+ st := llb.Scratch()
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- defer client.Close()
+
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
- var (
- imageService = client.ImageService()
- contentStore = client.ContentStore()
- ctx = namespaces.WithNamespace(sb.Context(), "buildkit")
- )
-
- c, err := New(sb.Context(), sb.Address())
- require.NoError(t, err)
- defer c.Close()
+ target := registry + "/buildkit/build/exporter:zstd"
- // Prepare stargz inline cache
- orgImage := "docker.io/library/alpine:latest"
- sgzImage := registry + "/stargz/alpine:" + identity.NewID()
- baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"}))
- def, err := baseDef.Marshal(sb.Context())
- require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
- "name": sgzImage,
- "push": "true",
- "compression": "estargz",
- "oci-mediatypes": "true",
- "force-compression": "true",
+ "name": target,
+ "push": "true",
+ "compression": "zstd",
+
+ // containerd applier supports only zstd with oci-mediatype.
+ "oci-mediatypes": "true",
},
},
},
- CacheExports: []CacheOptionsEntry{
- {
- Type: "inline",
- },
- },
}, nil)
require.NoError(t, err)
- // clear all local state out
- err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete())
- require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ ensurePruneAll(t, c, sb)
- // stargz layers should be lazy even for executing something on them
- def, err = baseDef.
- Run(llb.Args([]string{"/bin/touch", "/bar"})).
- Marshal(sb.Context())
- require.NoError(t, err)
- target := registry + "/buildkit/testlazyimage:" + identity.NewID()
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- "oci-mediatypes": "true",
- "compression": "estargz",
- },
- },
- },
- CacheExports: []CacheOptionsEntry{
- {
- Type: "inline",
- },
- },
- CacheImports: []CacheOptionsEntry{
- {
- Type: "registry",
- Attrs: map[string]string{
- "ref": sgzImage,
- },
- },
- },
- }, nil)
- require.NoError(t, err)
-
- img, err := imageService.Get(ctx, target)
- require.NoError(t, err)
-
- manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
- require.NoError(t, err)
-
- // Check if image layers are lazy.
- // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer.
- var sgzLayers []ocispecs.Descriptor
- for i, layer := range manifest.Layers[:len(manifest.Layers)-1] {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i)
- sgzLayers = append(sgzLayers, layer)
- }
- require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull")
+ st = llb.Scratch().File(llb.Copy(llb.Image(target), "/data", "/zdata"))
- // The topmost(last) layer created by `Run` shouldn't be lazy
- _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest)
+ def, err = st.Marshal(sb.Context())
require.NoError(t, err)
- // clear all local state out
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
- require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ destDir := t.TempDir()
- // stargz layers should be exportable
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterOCI,
- Output: fixedWriteCloser(outW),
- },
- },
- CacheImports: []CacheOptionsEntry{
- {
- Type: "registry",
- Attrs: map[string]string{
- "ref": sgzImage,
- },
+ Type: ExporterLocal,
+ OutputDir: destDir,
},
},
}, nil)
require.NoError(t, err)
- // Check if image layers are un-lazied
- for _, layer := range sgzLayers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.NoError(t, err)
- }
-
- ensurePruneAll(t, c, sb)
+ dt, err := os.ReadFile(filepath.Join(destDir, "zdata"))
+ require.NoError(t, err)
+ require.Equal(t, dt, []byte("zstd"))
}
-
-func testStargzLazyPull(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+func testBuildPushAndValidate(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
- cdAddress := sb.ContainerdAddress()
- if cdAddress == "" || sb.Snapshotter() != "stargz" {
- t.Skip("test requires containerd worker with stargz snapshotter")
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
- client, err := newContainerd(cdAddress)
+ run(`sh -e -c "mkdir -p foo/sub; echo -n first > foo/sub/bar; chmod 0741 foo;"`)
+ run(`true`) // this doesn't create a layer
+ run(`sh -c "echo -n second > foo/sub/baz"`)
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- defer client.Close()
+
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
- var (
- imageService = client.ImageService()
- contentStore = client.ContentStore()
- ctx = namespaces.WithNamespace(sb.Context(), "buildkit")
- )
-
- c, err := New(sb.Context(), sb.Address())
- require.NoError(t, err)
- defer c.Close()
+ target := registry + "/buildkit/testpush:latest"
- // Prepare stargz image
- orgImage := "docker.io/library/alpine:latest"
- sgzImage := registry + "/stargz/alpine:" + identity.NewID()
- def, err := llb.Image(orgImage).Marshal(sb.Context())
- require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
- "name": sgzImage,
- "push": "true",
- "compression": "estargz",
- "oci-mediatypes": "true",
- "force-compression": "true",
+ "name": target,
+ "push": "true",
},
},
},
}, nil)
require.NoError(t, err)
- // clear all local state out
- err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete())
- require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ // test existence of the image with next build
+ firstBuild := llb.Image(target)
- // stargz layers should be lazy even for executing something on them
- def, err = llb.Image(sgzImage).
- Run(llb.Args([]string{"/bin/touch", "/foo"})).
- Marshal(sb.Context())
+ def, err = firstBuild.Marshal(sb.Context())
require.NoError(t, err)
- target := registry + "/buildkit/testlazyimage:" + identity.NewID()
+
+ destDir := t.TempDir()
+
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- "oci-mediatypes": "true",
- },
+ Type: ExporterLocal,
+ OutputDir: destDir,
},
},
}, nil)
require.NoError(t, err)
- img, err := imageService.Get(ctx, target)
- require.NoError(t, err)
-
- manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
- require.NoError(t, err)
-
- // Check if image layers are lazy.
- // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer.
- var sgzLayers []ocispecs.Descriptor
- for _, layer := range manifest.Layers[:len(manifest.Layers)-1] {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
- sgzLayers = append(sgzLayers, layer)
- }
- require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull")
-
- // The topmost(last) layer created by `Run` shouldn't be lazy
- _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest)
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo/sub/bar"))
require.NoError(t, err)
+ require.Equal(t, dt, []byte("first"))
- // clear all local state out
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ dt, err = os.ReadFile(filepath.Join(destDir, "foo/sub/baz"))
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ require.Equal(t, dt, []byte("second"))
- // stargz layers should be exportable
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterOCI,
- Output: fixedWriteCloser(outW),
- },
- },
- }, nil)
+ fi, err := os.Stat(filepath.Join(destDir, "foo"))
require.NoError(t, err)
+ require.Equal(t, 0741, int(fi.Mode()&0777))
- // Check if image layers are un-lazied
- for _, layer := range sgzLayers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.NoError(t, err)
- }
-
- ensurePruneAll(t, c, sb)
-}
-
-func testLazyImagePush(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- requiresLinux(t)
+ checkAllReleasable(t, c, sb, false)
+ // examine contents of exported tars (requires containerd)
cdAddress := sb.ContainerdAddress()
if cdAddress == "" {
- t.Skip("test requires containerd worker")
+ t.Skip("rest of test requires containerd worker")
}
+ // TODO: make public pull helper function so this can be checked for standalone as well
+
client, err := newContainerd(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
-
- c, err := New(sb.Context(), sb.Address())
- require.NoError(t, err)
- defer c.Close()
-
- // push the busybox image to the mutable registry
- sourceImage := "busybox:latest"
- def, err := llb.Image(sourceImage).Marshal(sb.Context())
+ // check image in containerd
+ _, err = client.ImageService().Get(ctx, target)
require.NoError(t, err)
- targetNoTag := registry + "/buildkit/testlazyimage:"
- target := targetNoTag + "latest"
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- },
- },
- },
- }, nil)
+ // deleting image should release all content
+ err = client.ImageService().Delete(ctx, target, images.SynchronousDelete())
require.NoError(t, err)
- imageService := client.ImageService()
- contentStore := client.ContentStore()
+ checkAllReleasable(t, c, sb, true)
- img, err := imageService.Get(ctx, target)
+ img, err := client.Pull(ctx, target)
require.NoError(t, err)
- manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
+ desc, err := img.Config(ctx)
require.NoError(t, err)
- for _, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.NoError(t, err)
- }
-
- // clear all local state out
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), desc)
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
- // retag the image we just pushed with no actual changes, which
- // should not result in the image getting un-lazied
- def, err = llb.Image(target).Marshal(sb.Context())
+ var ociimg ocispecs.Image
+ err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
- target2 := targetNoTag + "newtag"
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target2,
- "push": "true",
- },
- },
- },
- }, nil)
- require.NoError(t, err)
+ require.NotEqual(t, "", ociimg.OS)
+ require.NotEqual(t, "", ociimg.Architecture)
+ require.NotEqual(t, "", ociimg.Config.WorkingDir)
+ require.Equal(t, "layers", ociimg.RootFS.Type)
+ require.Equal(t, 3, len(ociimg.RootFS.DiffIDs))
+ require.NotNil(t, ociimg.Created)
+ require.True(t, time.Since(*ociimg.Created) < 2*time.Minute)
+ require.Condition(t, func() bool {
+ for _, env := range ociimg.Config.Env {
+ if strings.HasPrefix(env, "PATH=") {
+ return true
+ }
+ }
+ return false
+ })
- img, err = imageService.Get(ctx, target2)
+ require.Equal(t, 3, len(ociimg.History))
+ require.Contains(t, ociimg.History[0].CreatedBy, "foo/sub/bar")
+ require.Contains(t, ociimg.History[1].CreatedBy, "true")
+ require.Contains(t, ociimg.History[2].CreatedBy, "foo/sub/baz")
+ require.False(t, ociimg.History[0].EmptyLayer)
+ require.False(t, ociimg.History[1].EmptyLayer)
+ require.False(t, ociimg.History[2].EmptyLayer)
+
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), img.Target())
require.NoError(t, err)
- manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ var mfst = struct {
+ MediaType string `json:"mediaType,omitempty"`
+ ocispecs.Manifest
+ }{}
+
+ err = json.Unmarshal(dt, &mfst)
require.NoError(t, err)
- for _, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
- }
+ require.Equal(t, images.MediaTypeDockerSchema2Manifest, mfst.MediaType)
+ require.Equal(t, 3, len(mfst.Layers))
+ require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[0].MediaType)
+ require.Equal(t, images.MediaTypeDockerSchema2LayerGzip, mfst.Layers[1].MediaType)
- // clear all local state out again
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[0].Digest})
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
- // try a cross-repo push to same registry, which should still result in the
- // image remaining lazy
- target3 := registry + "/buildkit/testlazycrossrepo:latest"
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target3,
- "push": "true",
- },
- },
- },
- }, nil)
+ m, err := testutil.ReadTarToMap(dt, true)
require.NoError(t, err)
- img, err = imageService.Get(ctx, target3)
+ item, ok := m["foo/"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
+ require.Equal(t, 0741, int(item.Header.Mode&0777))
+
+ item, ok = m["foo/sub/"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
+
+ item, ok = m["foo/sub/bar"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
+ require.Equal(t, []byte("first"), item.Data)
+
+ _, ok = m["foo/sub/baz"]
+ require.False(t, ok)
+
+ dt, err = content.ReadBlob(ctx, img.ContentStore(), ocispecs.Descriptor{Digest: mfst.Layers[2].Digest})
require.NoError(t, err)
- manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ m, err = testutil.ReadTarToMap(dt, true)
require.NoError(t, err)
- for _, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ item, ok = m["foo/sub/baz"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeReg)
+ require.Equal(t, []byte("second"), item.Data)
+
+ item, ok = m["foo/"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
+ require.Equal(t, 0741, int(item.Header.Mode&0777))
+
+ item, ok = m["foo/sub/"]
+ require.True(t, ok)
+ require.Equal(t, int32(item.Header.Typeflag), tar.TypeDir)
+
+ _, ok = m["foo/sub/bar"]
+ require.False(t, ok)
+}
+
+func testStargzLazyRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ requiresLinux(t)
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" || sb.Snapshotter() != "stargz" {
+ t.Skip("test requires containerd worker with stargz snapshotter")
}
- // check that a subsequent build can use the previously lazy image in an exec
- def, err = llb.Image(target2).Run(llb.Args([]string{"true"})).Marshal(sb.Context())
+ client, err := newContainerd(cdAddress)
require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ defer client.Close()
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
-}
-func testZstdLocalCacheExport(t *testing.T, sb integration.Sandbox) {
+ var (
+ imageService = client.ImageService()
+ contentStore = client.ContentStore()
+ ctx = namespaces.WithNamespace(sb.Context(), "buildkit")
+ )
+
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- busybox := llb.Image("busybox:latest")
- cmd := `sh -e -c "echo -n zstd > data"`
-
- st := llb.Scratch()
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
-
- def, err := st.Marshal(sb.Context())
- require.NoError(t, err)
+ destDir := t.TempDir()
- destDir, err := ioutil.TempDir("", "buildkit")
+ // Prepare stargz registry cache
+ orgImage := "docker.io/library/alpine:latest"
+ sgzCache := registry + "/stargz/alpinecache:" + identity.NewID()
+ baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"}))
+ def, err := baseDef.Marshal(sb.Context())
require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- destOutDir, err := ioutil.TempDir("", "buildkit")
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
require.NoError(t, err)
- defer os.RemoveAll(destOutDir)
-
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destOutDir,
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
},
},
- // compression option should work even with inline cache exports
CacheExports: []CacheOptionsEntry{
{
- Type: "local",
+ Type: "registry",
Attrs: map[string]string{
- "dest": destDir,
- "compression": "zstd",
+ "ref": sgzCache,
+ "compression": "estargz",
+ "oci-mediatypes": "true",
+ "force-compression": "true",
},
},
},
}, nil)
require.NoError(t, err)
- var index ocispecs.Index
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "index.json"))
+ // clear all local state out
+ ensurePruneAll(t, c, sb)
+
+ // stargz layers should be lazy even for executing something on them
+ def, err = baseDef.
+ Run(llb.Args([]string{"/bin/touch", "/bar"})).
+ Marshal(sb.Context())
require.NoError(t, err)
- err = json.Unmarshal(dt, &index)
+ target := registry + "/buildkit/testlazyimage:" + identity.NewID()
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ },
+ },
+ CacheImports: []CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": sgzCache,
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- var layerIndex ocispecs.Index
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "blobs/sha256/"+index.Manifests[0].Digest.Hex()))
+ img, err := imageService.Get(ctx, target)
require.NoError(t, err)
- err = json.Unmarshal(dt, &layerIndex)
+
+ manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
require.NoError(t, err)
- lastLayer := layerIndex.Manifests[len(layerIndex.Manifests)-2]
- require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType)
+ // Check if image layers are lazy.
+ // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer.
+ var sgzLayers []ocispecs.Descriptor
+ for i, layer := range manifest.Layers[:len(manifest.Layers)-1] {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i)
+ sgzLayers = append(sgzLayers, layer)
+ }
+ require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull")
- zstdLayerDigest := lastLayer.Digest.Hex()
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "blobs/sha256/"+zstdLayerDigest))
+ // The topmost(last) layer created by `Run` shouldn't be lazy
+ _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest)
require.NoError(t, err)
- require.Equal(t, dt[:4], []byte{0x28, 0xb5, 0x2f, 0xfd})
-}
-func testUncompressedLocalCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- dir, err := ioutil.TempDir("", "buildkit")
+ // clear all local state out
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
require.NoError(t, err)
- defer os.RemoveAll(dir)
- im := CacheOptionsEntry{
- Type: "local",
- Attrs: map[string]string{
- "src": dir,
+ checkAllReleasable(t, c, sb, true)
+
+ // stargz layers should be exportable
+ out = filepath.Join(destDir, "out2.tar")
+ outW, err = os.Create(out)
+ require.NoError(t, err)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
+ },
},
- }
- ex := CacheOptionsEntry{
- Type: "local",
- Attrs: map[string]string{
- "dest": dir,
- "compression": "uncompressed",
- "force-compression": "true",
+ CacheImports: []CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": sgzCache,
+ },
+ },
},
+ }, nil)
+ require.NoError(t, err)
+
+ // Check if image layers are un-lazied
+ for _, layer := range sgzLayers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.NoError(t, err)
}
- testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
+
+ ensurePruneAll(t, c, sb)
}
-func testUncompressedRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
- target := registry + "/buildkit/testexport:latest"
- im := CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target,
- },
- }
- ex := CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target,
- "compression": "uncompressed",
- "force-compression": "true",
- },
- }
- testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
-}
-
-func testZstdLocalCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- if sb.Name() == "containerd-1.4" {
- // containerd 1.4 doesn't support zstd compression
- return
- }
- dir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(dir)
- im := CacheOptionsEntry{
- Type: "local",
- Attrs: map[string]string{
- "src": dir,
- },
- }
- ex := CacheOptionsEntry{
- Type: "local",
- Attrs: map[string]string{
- "dest": dir,
- "compression": "zstd",
- "force-compression": "true",
- "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype.
- },
- }
- testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
-}
-
-func testZstdRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- if sb.Name() == "containerd-1.4" {
- // containerd 1.4 doesn't support zstd compression
- return
- }
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
- target := registry + "/buildkit/testexport:latest"
- im := CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target,
- },
- }
- ex := CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target,
- "compression": "zstd",
- "force-compression": "true",
- "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype.
- },
- }
- testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
-}
-
-func testBasicCacheImportExport(t *testing.T, sb integration.Sandbox, cacheOptionsEntryImport, cacheOptionsEntryExport []CacheOptionsEntry) {
+func testStargzLazyInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
- require.NoError(t, err)
- defer c.Close()
-
- busybox := llb.Image("busybox:latest")
- st := llb.Scratch()
-
- run := func(cmd string) {
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" || sb.Snapshotter() != "stargz" {
+ t.Skip("test requires containerd worker with stargz snapshotter")
}
- run(`sh -c "echo -n foobar > const"`)
- run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`)
-
- def, err := st.Marshal(sb.Context())
- require.NoError(t, err)
-
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- CacheExports: cacheOptionsEntryExport,
- }, nil)
- require.NoError(t, err)
-
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "const"))
- require.NoError(t, err)
- require.Equal(t, string(dt), "foobar")
-
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "unique"))
- require.NoError(t, err)
-
- ensurePruneAll(t, c, sb)
-
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- }},
- CacheImports: cacheOptionsEntryImport,
- }, nil)
- require.NoError(t, err)
-
- dt2, err := ioutil.ReadFile(filepath.Join(destDir, "const"))
- require.NoError(t, err)
- require.Equal(t, string(dt2), "foobar")
-
- dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique"))
- require.NoError(t, err)
- require.Equal(t, string(dt), string(dt2))
-}
-
-func testBasicRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
+ client, err := newContainerd(cdAddress)
require.NoError(t, err)
- target := registry + "/buildkit/testexport:latest"
- o := CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target,
- },
- }
- testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o}, []CacheOptionsEntry{o})
-}
-
-func testMultipleRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ defer client.Close()
registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}
require.NoError(t, err)
- target := registry + "/buildkit/testexport:latest"
- o := CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target,
- },
- }
- o2 := CacheOptionsEntry{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target + "notexist",
- },
- }
- testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o, o2}, []CacheOptionsEntry{o})
-}
-
-func testBasicLocalCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- dir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(dir)
- im := CacheOptionsEntry{
- Type: "local",
- Attrs: map[string]string{
- "src": dir,
- },
- }
- ex := CacheOptionsEntry{
- Type: "local",
- Attrs: map[string]string{
- "dest": dir,
- },
- }
- testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
-}
-func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- requiresLinux(t)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
+ var (
+ imageService = client.ImageService()
+ contentStore = client.ContentStore()
+ ctx = namespaces.WithNamespace(sb.Context(), "buildkit")
+ )
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- busybox := llb.Image("busybox:latest")
- st := llb.Scratch()
-
- run := func(cmd string) {
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
- }
-
- run(`sh -c "echo -n foobar > const"`)
- run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`)
-
- def, err := st.Marshal(sb.Context())
+ // Prepare stargz inline cache
+ orgImage := "docker.io/library/alpine:latest"
+ sgzImage := registry + "/stargz/alpine:" + identity.NewID()
+ baseDef := llb.Image(orgImage).Run(llb.Args([]string{"/bin/touch", "/foo"}))
+ def, err := baseDef.Marshal(sb.Context())
require.NoError(t, err)
-
- target := registry + "/buildkit/testexportinline:latest"
-
- resp, err := c.Solve(sb.Context(), def, SolveOpt{
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
- "name": target,
- "push": "true",
+ "name": sgzImage,
+ "push": "true",
+ "compression": "estargz",
+ "oci-mediatypes": "true",
+ "force-compression": "true",
},
},
},
@@ -3798,23 +3948,28 @@ func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- dgst, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
- require.Equal(t, ok, true)
-
- unique, err := readFileInImage(sb.Context(), c, target+"@"+dgst, "/unique")
+ // clear all local state out
+ err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete())
require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
- ensurePruneAll(t, c, sb)
-
- resp, err = c.Solve(sb.Context(), def, SolveOpt{
- // specifying inline cache exporter is needed for reproducing containerimage.digest
- // (not needed for reproducing rootfs/unique)
+ // stargz layers should be lazy even for executing something on them
+ def, err = baseDef.
+ Run(llb.Args([]string{"/bin/touch", "/bar"})).
+ Marshal(sb.Context())
+ require.NoError(t, err)
+ target := registry + "/buildkit/testlazyimage:" + identity.NewID()
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
- "name": target,
- "push": "true",
+ "name": target,
+ "push": "true",
+ "oci-mediatypes": "true",
+ "compression": "estargz",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
},
},
},
@@ -3827,514 +3982,720 @@ func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
{
Type: "registry",
Attrs: map[string]string{
- "ref": target,
+ "ref": sgzImage,
},
},
},
}, nil)
require.NoError(t, err)
- dgst2, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
- require.Equal(t, ok, true)
-
- require.Equal(t, dgst, dgst2)
+ img, err := imageService.Get(ctx, target)
+ require.NoError(t, err)
- ensurePruneAll(t, c, sb)
+ manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
- // Export the cache again with compression
- resp, err = c.Solve(sb.Context(), def, SolveOpt{
- // specifying inline cache exporter is needed for reproducing containerimage.digest
- // (not needed for reproducing rootfs/unique)
+ // Check if image layers are lazy.
+ // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer.
+ var sgzLayers []ocispecs.Descriptor
+ for i, layer := range manifest.Layers[:len(manifest.Layers)-1] {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v on layer %+v (%d)", err, layer, i)
+ sgzLayers = append(sgzLayers, layer)
+ }
+ require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull")
+
+ // The topmost(last) layer created by `Run` shouldn't be lazy
+ _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest)
+ require.NoError(t, err)
+
+ // clear all local state out
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
+
+ // stargz layers should be exportable
+ destDir := t.TempDir()
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- "compression": "uncompressed", // inline cache should work with compression
- "force-compression": "true",
- },
- },
- },
- CacheExports: []CacheOptionsEntry{
- {
- Type: "inline",
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
},
},
CacheImports: []CacheOptionsEntry{
{
Type: "registry",
Attrs: map[string]string{
- "ref": target,
+ "ref": sgzImage,
},
},
},
}, nil)
require.NoError(t, err)
- dgst2uncompress, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
- require.Equal(t, ok, true)
+ // Check if image layers are un-lazied
+ for _, layer := range sgzLayers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.NoError(t, err)
+ }
- // dgst2uncompress != dgst, because the compression type is different
- unique2uncompress, err := readFileInImage(sb.Context(), c, target+"@"+dgst2uncompress, "/unique")
+ ensurePruneAll(t, c, sb)
+}
+
+func testStargzLazyPull(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" || sb.Snapshotter() != "stargz" {
+ t.Skip("test requires containerd worker with stargz snapshotter")
+ }
+
+ client, err := newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- require.EqualValues(t, unique, unique2uncompress)
- ensurePruneAll(t, c, sb)
+ var (
+ imageService = client.ImageService()
+ contentStore = client.ContentStore()
+ ctx = namespaces.WithNamespace(sb.Context(), "buildkit")
+ )
- resp, err = c.Solve(sb.Context(), def, SolveOpt{
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ // Prepare stargz image
+ orgImage := "docker.io/library/alpine:latest"
+ sgzImage := registry + "/stargz/alpine:" + identity.NewID()
+ def, err := llb.Image(orgImage).Marshal(sb.Context())
+ require.NoError(t, err)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
- "name": target,
- "push": "true",
+ "name": sgzImage,
+ "push": "true",
+ "compression": "estargz",
+ "oci-mediatypes": "true",
+ "force-compression": "true",
},
},
},
- CacheImports: []CacheOptionsEntry{
+ }, nil)
+ require.NoError(t, err)
+
+ // clear all local state out
+ err = imageService.Delete(ctx, sgzImage, images.SynchronousDelete())
+ require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
+
+ // stargz layers should be lazy even for executing something on them
+ def, err = llb.Image(sgzImage).
+ Run(llb.Args([]string{"/bin/touch", "/foo"})).
+ Marshal(sb.Context())
+ require.NoError(t, err)
+ target := registry + "/buildkit/testlazyimage:" + identity.NewID()
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
{
- Type: "registry",
+ Type: ExporterImage,
Attrs: map[string]string{
- "ref": target,
+ "name": target,
+ "push": "true",
+ "oci-mediatypes": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
},
},
},
}, nil)
require.NoError(t, err)
- dgst3, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
- require.Equal(t, ok, true)
+ img, err := imageService.Get(ctx, target)
+ require.NoError(t, err)
- // dgst3 != dgst, because inline cache is not exported for dgst3
- unique3, err := readFileInImage(sb.Context(), c, target+"@"+dgst3, "/unique")
+ manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
require.NoError(t, err)
- require.EqualValues(t, unique, unique3)
-}
-func readFileInImage(ctx context.Context, c *Client, ref, path string) ([]byte, error) {
- def, err := llb.Image(ref).Marshal(ctx)
- if err != nil {
- return nil, err
- }
- destDir, err := ioutil.TempDir("", "buildkit")
- if err != nil {
- return nil, err
+ // Check if image layers are lazy.
+ // The topmost(last) layer created by `Run` isn't lazy so we skip the check for the layer.
+ var sgzLayers []ocispecs.Descriptor
+ for _, layer := range manifest.Layers[:len(manifest.Layers)-1] {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ sgzLayers = append(sgzLayers, layer)
}
- defer os.RemoveAll(destDir)
+ require.NotEqual(t, 0, len(sgzLayers), "no layer can be used for checking lazypull")
- _, err = c.Solve(ctx, def, SolveOpt{
+ // The topmost(last) layer created by `Run` shouldn't be lazy
+ _, err = contentStore.Info(ctx, manifest.Layers[len(manifest.Layers)-1].Digest)
+ require.NoError(t, err)
+
+ // clear all local state out
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
+
+ // stargz layers should be exportable
+ destDir := t.TempDir()
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDir,
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
},
},
}, nil)
- if err != nil {
- return nil, err
+ require.NoError(t, err)
+
+ // Check if image layers are un-lazied
+ for _, layer := range sgzLayers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.NoError(t, err)
}
- return ioutil.ReadFile(filepath.Join(destDir, filepath.Clean(path)))
+
+ ensurePruneAll(t, c, sb)
}
-func testCachedMounts(t *testing.T, sb integration.Sandbox) {
+func testLazyImagePush(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
requiresLinux(t)
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" {
+ t.Skip("test requires containerd worker")
+ }
+
+ client, err := newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- busybox := llb.Image("busybox:latest")
- // setup base for one of the cache sources
- st := busybox.Run(llb.Shlex(`sh -c "echo -n base > baz"`), llb.Dir("/wd"))
- base := st.AddMount("/wd", llb.Scratch())
-
- st = busybox.Run(llb.Shlex(`sh -c "echo -n first > foo"`), llb.Dir("/wd"))
- st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
- st = st.Run(llb.Shlex(`sh -c "cat foo && echo -n second > /wd2/bar"`), llb.Dir("/wd"))
- st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
- st.AddMount("/wd2", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared))
+ // push the busybox image to the mutable registry
+ sourceImage := "busybox:latest"
+ def, err := llb.Image(sourceImage).Marshal(sb.Context())
+ require.NoError(t, err)
- def, err := st.Marshal(sb.Context())
+ targetNoTag := registry + "/buildkit/testlazyimage:"
+ target := targetNoTag + "latest"
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ imageService := client.ImageService()
+ contentStore := client.ContentStore()
+
+ img, err := imageService.Get(ctx, target)
require.NoError(t, err)
- // repeat to make sure cache works
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
require.NoError(t, err)
- // second build using cache directories
- st = busybox.Run(llb.Shlex(`sh -c "cp /src0/foo . && cp /src1/bar . && cp /src1/baz ."`), llb.Dir("/wd"))
- out := st.AddMount("/wd", llb.Scratch())
- st.AddMount("/src0", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
- st.AddMount("/src1", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared))
+ for _, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.NoError(t, err)
+ }
- destDir, err := ioutil.TempDir("", "buildkit")
+ // clear all local state out
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ checkAllReleasable(t, c, sb, true)
- def, err = out.Marshal(sb.Context())
+ // retag the image we just pushed with no actual changes, which
+ // should not result in the image getting un-lazied
+ def, err = llb.Image(target).Marshal(sb.Context())
require.NoError(t, err)
+ target2 := targetNoTag + "newtag"
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDir,
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target2,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
},
},
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ img, err = imageService.Get(ctx, target2)
require.NoError(t, err)
- require.Equal(t, string(dt), "first")
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar"))
+ manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
require.NoError(t, err)
- require.Equal(t, string(dt), "second")
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "baz"))
- require.NoError(t, err)
- require.Equal(t, string(dt), "base")
+ for _, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ }
+ // clear all local state out again
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ require.NoError(t, err)
checkAllReleasable(t, c, sb, true)
-}
-func testSharedCacheMounts(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ // try a cross-repo push to same registry, which should still result in the
+ // image remaining lazy
+ target3 := registry + "/buildkit/testlazycrossrepo:latest"
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target3,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- defer c.Close()
- busybox := llb.Image("busybox:latest")
- st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
- st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
+ img, err = imageService.Get(ctx, target3)
+ require.NoError(t, err)
- st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
- st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
+ manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
- out := busybox.Run(llb.Shlex("true"))
- out.AddMount("/m1", st.Root())
- out.AddMount("/m2", st2.Root())
+ for _, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ }
- def, err := out.Marshal(sb.Context())
+ // check that a subsequent build can use the previously lazy image in an exec
+ def, err = llb.Image(target2).Run(llb.Args([]string{"true"})).Marshal(sb.Context())
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
require.NoError(t, err)
}
-// #2334
-func testSharedCacheMountsNoScratch(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
+func testZstdLocalCacheExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
- st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
- st.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
-
- st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
- st2.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
+ cmd := `sh -e -c "echo -n zstd > data"`
- out := busybox.Run(llb.Shlex("true"))
- out.AddMount("/m1", st.Root())
- out.AddMount("/m2", st2.Root())
+ st := llb.Scratch()
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
- def, err := out.Marshal(sb.Context())
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
-}
+ destDir := t.TempDir()
+ destOutDir := t.TempDir()
-func testLockedCacheMounts(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destOutDir,
+ },
+ },
+ // compression option should work even with inline cache exports
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": destDir,
+ "compression": "zstd",
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- defer c.Close()
-
- busybox := llb.Image("busybox:latest")
- st := busybox.Run(llb.Shlex(`sh -e -c "touch one; if [[ -f two ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f two ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd"))
- st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
-
- st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; if [[ -f one ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f one ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd"))
- st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
-
- out := busybox.Run(llb.Shlex("true"))
- out.AddMount("/m1", st.Root())
- out.AddMount("/m2", st2.Root())
- def, err := out.Marshal(sb.Context())
+ var index ocispecs.Index
+ dt, err := os.ReadFile(filepath.Join(destDir, "index.json"))
require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ err = json.Unmarshal(dt, &index)
require.NoError(t, err)
-}
-func testDuplicateCacheMount(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ var layerIndex ocispecs.Index
+ dt, err = os.ReadFile(filepath.Join(destDir, "blobs/sha256/"+index.Manifests[0].Digest.Hex()))
require.NoError(t, err)
- defer c.Close()
-
- busybox := llb.Image("busybox:latest")
-
- out := busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m2/foo ]]; touch /m1/foo; [[ -f /m2/foo ]];"`))
- out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
- out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
-
- def, err := out.Marshal(sb.Context())
+ err = json.Unmarshal(dt, &layerIndex)
require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ lastLayer := layerIndex.Manifests[len(layerIndex.Manifests)-2]
+ require.Equal(t, ocispecs.MediaTypeImageLayer+"+zstd", lastLayer.MediaType)
+
+ zstdLayerDigest := lastLayer.Digest.Hex()
+ dt, err = os.ReadFile(filepath.Join(destDir, "blobs/sha256/"+zstdLayerDigest))
require.NoError(t, err)
+ require.Equal(t, dt[:4], []byte{0x28, 0xb5, 0x2f, 0xfd})
}
-func testRunCacheWithMounts(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
+func testCacheExportIgnoreError(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
+ cmd := `sh -e -c "echo -n ignore-error > data"`
- out := busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`))
- out.AddMount("/m1", llb.Image("alpine:latest"), llb.Readonly)
+ st := llb.Scratch()
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
- def, err := out.Marshal(sb.Context())
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
+ tests := map[string]struct {
+ Exports []ExportEntry
+ CacheExports []CacheOptionsEntry
+ expectedErrors []string
+ }{
+ "local-ignore-error": {
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: t.TempDir(),
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": "éèç",
+ },
+ },
+ },
+ expectedErrors: []string{"failed to solve", "contains value with non-printable ASCII characters"},
+ },
+ "registry-ignore-error": {
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": "test-registry-ignore-error",
+ "push": "false",
+ },
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": "fake-url:5000/myrepo:buildcache",
+ },
+ },
+ },
+ expectedErrors: []string{"failed to solve", "dial tcp: lookup fake-url", "no such host"},
+ },
+ "s3-ignore-error": {
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: t.TempDir(),
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "s3",
+ Attrs: map[string]string{
+ "endpoint_url": "http://fake-url:9000",
+ "bucket": "my-bucket",
+ "region": "us-east-1",
+ "access_key_id": "minioadmin",
+ "secret_access_key": "minioadmin",
+ "use_path_style": "true",
+ },
+ },
+ },
+ expectedErrors: []string{"failed to solve", "dial tcp: lookup fake-url", "no such host"},
+ },
+ }
+ ignoreErrorValues := []bool{true, false}
+ for _, ignoreError := range ignoreErrorValues {
+ ignoreErrStr := strconv.FormatBool(ignoreError)
+ for n, test := range tests {
+ require.Equal(t, 1, len(test.Exports))
+ require.Equal(t, 1, len(test.CacheExports))
+ require.NotEmpty(t, test.CacheExports[0].Attrs)
+ test.CacheExports[0].Attrs["ignore-error"] = ignoreErrStr
+ testName := fmt.Sprintf("%s-%s", n, ignoreErrStr)
+ t.Run(testName, func(t *testing.T) {
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: test.Exports,
+ CacheExports: test.CacheExports,
+ }, nil)
+ if ignoreError {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err)
+ for _, errStr := range test.expectedErrors {
+ require.Contains(t, err.Error(), errStr)
+ }
+ }
+ })
+ }
+ }
+}
- out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`))
- out.AddMount("/m1", llb.Image("busybox:latest"), llb.Readonly)
+func testUncompressedLocalCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ dir := t.TempDir()
+ im := CacheOptionsEntry{
+ Type: "local",
+ Attrs: map[string]string{
+ "src": dir,
+ },
+ }
+ ex := CacheOptionsEntry{
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": dir,
+ "compression": "uncompressed",
+ "force-compression": "true",
+ },
+ }
+ testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
+}
- def, err = out.Marshal(sb.Context())
+func testUncompressedRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
+ target := registry + "/buildkit/testexport:latest"
+ im := CacheOptionsEntry{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ }
+ ex := CacheOptionsEntry{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ "compression": "uncompressed",
+ "force-compression": "true",
+ },
+ }
+ testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
+}
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.Error(t, err)
+func testZstdLocalCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ dir := t.TempDir()
+ im := CacheOptionsEntry{
+ Type: "local",
+ Attrs: map[string]string{
+ "src": dir,
+ },
+ }
+ ex := CacheOptionsEntry{
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": dir,
+ "compression": "zstd",
+ "force-compression": "true",
+ "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype.
+ },
+ }
+ testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
}
-func testCacheMountNoCache(t *testing.T, sb integration.Sandbox) {
+func testZstdRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+ target := registry + "/buildkit/testexport:latest"
+ im := CacheOptionsEntry{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ }
+ ex := CacheOptionsEntry{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ "compression": "zstd",
+ "force-compression": "true",
+ "oci-mediatypes": "true", // containerd applier supports only zstd with oci-mediatype.
+ },
+ }
+ testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
+}
+
+func testBasicCacheImportExport(t *testing.T, sb integration.Sandbox, cacheOptionsEntryImport, cacheOptionsEntryExport []CacheOptionsEntry) {
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
busybox := llb.Image("busybox:latest")
-
- out := busybox.Run(llb.Shlex(`sh -e -c "touch /m1/foo; touch /m2/bar"`))
- out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
- out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked))
-
- def, err := out.Marshal(sb.Context())
- require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
-
- out = busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m1/foo ]]; touch /m1/foo2;"`), llb.IgnoreCache)
- out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
-
- def, err = out.Marshal(sb.Context())
- require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
-
- out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/foo2 ]]; [[ -f /m2/bar ]];"`))
- out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
- out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked))
-
- def, err = out.Marshal(sb.Context())
- require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
-}
-
-func testCopyFromEmptyImage(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
- require.NoError(t, err)
- defer c.Close()
-
- for _, image := range []llb.State{llb.Scratch(), llb.Image("tonistiigi/test:nolayers")} {
- st := llb.Scratch().File(llb.Copy(image, "/", "/"))
- def, err := st.Marshal(sb.Context())
- require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
-
- st = llb.Scratch().File(llb.Copy(image, "/foo", "/"))
- def, err = st.Marshal(sb.Context())
- require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.Error(t, err)
- require.Contains(t, err.Error(), "/foo: no such file or directory")
-
- busybox := llb.Image("busybox:latest")
-
- out := busybox.Run(llb.Shlex(`sh -e -c '[ $(ls /scratch | wc -l) = '0' ]'`))
- out.AddMount("/scratch", image, llb.Readonly)
-
- def, err = out.Marshal(sb.Context())
- require.NoError(t, err)
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.NoError(t, err)
- }
-}
-
-// containerd/containerd#2119
-func testDuplicateWhiteouts(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
- require.NoError(t, err)
- defer c.Close()
-
- busybox := llb.Image("busybox:latest")
- st := llb.Scratch()
+ st := llb.Scratch()
run := func(cmd string) {
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
- run(`sh -e -c "mkdir -p d0 d1; echo -n first > d1/bar;"`)
- run(`sh -c "rm -rf d0 d1"`)
+ run(`sh -c "echo -n foobar > const"`)
+ run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterOCI,
- Output: fixedWriteCloser(outW),
+ Type: ExporterLocal,
+ OutputDir: destDir,
},
},
+ CacheExports: cacheOptionsEntryExport,
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(out)
- require.NoError(t, err)
-
- m, err := testutil.ReadTarToMap(dt, false)
- require.NoError(t, err)
-
- var index ocispecs.Index
- err = json.Unmarshal(m["index.json"].Data, &index)
- require.NoError(t, err)
-
- var mfst ocispecs.Manifest
- err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
- require.NoError(t, err)
-
- lastLayer := mfst.Layers[len(mfst.Layers)-1]
-
- layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()]
- require.True(t, ok)
-
- m, err = testutil.ReadTarToMap(layer.Data, true)
+ dt, err := os.ReadFile(filepath.Join(destDir, "const"))
require.NoError(t, err)
+ require.Equal(t, string(dt), "foobar")
- _, ok = m[".wh.d0"]
- require.True(t, ok)
-
- _, ok = m[".wh.d1"]
- require.True(t, ok)
-
- // check for a bug that added whiteout for subfile
- _, ok = m["d1/.wh.bar"]
- require.True(t, !ok)
-}
-
-// #276
-func testWhiteoutParentDir(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ dt, err = os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
- defer c.Close()
-
- busybox := llb.Image("busybox:latest")
- st := llb.Scratch()
-
- run := func(cmd string) {
- st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
- }
-
- run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`)
- run(`rm foo/bar`)
- def, err := st.Marshal(sb.Context())
- require.NoError(t, err)
+ ensurePruneAll(t, c, sb)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterOCI,
- Output: fixedWriteCloser(outW),
- },
- },
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ }},
+ CacheImports: cacheOptionsEntryImport,
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(out)
+ dt2, err := os.ReadFile(filepath.Join(destDir, "const"))
require.NoError(t, err)
+ require.Equal(t, string(dt2), "foobar")
- m, err := testutil.ReadTarToMap(dt, false)
+ dt2, err = os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
+ require.Equal(t, string(dt), string(dt2))
+}
- var index ocispecs.Index
- err = json.Unmarshal(m["index.json"].Data, &index)
+func testBasicRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
+ target := registry + "/buildkit/testexport:latest"
+ o := CacheOptionsEntry{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ }
+ testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o}, []CacheOptionsEntry{o})
+}
- var mfst ocispecs.Manifest
- err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
+func testMultipleRegistryCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
+ target := registry + "/buildkit/testexport:latest"
+ o := CacheOptionsEntry{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ }
+ o2 := CacheOptionsEntry{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target + "notexist",
+ },
+ }
+ testBasicCacheImportExport(t, sb, []CacheOptionsEntry{o, o2}, []CacheOptionsEntry{o})
+}
- lastLayer := mfst.Layers[len(mfst.Layers)-1]
-
- layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()]
- require.True(t, ok)
+func testBasicLocalCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
+ dir := t.TempDir()
+ im := CacheOptionsEntry{
+ Type: "local",
+ Attrs: map[string]string{
+ "src": dir,
+ },
+ }
+ ex := CacheOptionsEntry{
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": dir,
+ },
+ }
+ testBasicCacheImportExport(t, sb, []CacheOptionsEntry{im}, []CacheOptionsEntry{ex})
+}
- m, err = testutil.ReadTarToMap(layer.Data, true)
+func testBasicInlineCacheImportExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureCacheImport)
+ requiresLinux(t)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- _, ok = m["foo/.wh.bar"]
- require.True(t, ok)
-
- _, ok = m["foo/"]
- require.True(t, ok)
-}
-
-// #2490
-func testMoveParentDir(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
@@ -4346,68 +4707,183 @@ func testMoveParentDir(t *testing.T, sb integration.Sandbox) {
st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
- run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`)
- run(`mv foo foo2`)
+ run(`sh -c "echo -n foobar > const"`)
+ run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ target := registry + "/buildkit/testexportinline:latest"
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
+ resp, err := c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterOCI,
- Output: fixedWriteCloser(outW),
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "inline",
},
},
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(out)
+ dgst, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
+ require.Equal(t, ok, true)
+
+ unique, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst, "/unique")
require.NoError(t, err)
- m, err := testutil.ReadTarToMap(dt, false)
- require.NoError(t, err)
+ ensurePruneAll(t, c, sb)
- var index ocispecs.Index
- err = json.Unmarshal(m["index.json"].Data, &index)
+ resp, err = c.Solve(sb.Context(), def, SolveOpt{
+ // specifying inline cache exporter is needed for reproducing containerimage.digest
+ // (not needed for reproducing rootfs/unique)
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "inline",
+ },
+ },
+ CacheImports: []CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- var mfst ocispecs.Manifest
- err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
+ dgst2, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
+ require.Equal(t, ok, true)
+
+ require.Equal(t, dgst, dgst2)
+
+ ensurePruneAll(t, c, sb)
+
+ // Export the cache again with compression
+ resp, err = c.Solve(sb.Context(), def, SolveOpt{
+ // specifying inline cache exporter is needed for reproducing containerimage.digest
+ // (not needed for reproducing rootfs/unique)
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "compression": "uncompressed", // inline cache should work with compression
+ "force-compression": "true",
+ },
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "inline",
+ },
+ },
+ CacheImports: []CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- lastLayer := mfst.Layers[len(mfst.Layers)-1]
+ dgst2uncompress, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
+ require.Equal(t, ok, true)
- layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()]
- require.True(t, ok)
+ // dgst2uncompress != dgst, because the compression type is different
+ unique2uncompress, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst2uncompress, "/unique")
+ require.NoError(t, err)
+ require.EqualValues(t, unique, unique2uncompress)
- m, err = testutil.ReadTarToMap(layer.Data, true)
+ ensurePruneAll(t, c, sb)
+
+ resp, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ CacheImports: []CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- _, ok = m[".wh.foo"]
- require.True(t, ok)
+ dgst3, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
+ require.Equal(t, ok, true)
- _, ok = m["foo2/"]
- require.True(t, ok)
+ // dgst3 != dgst, because inline cache is not exported for dgst3
+ unique3, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst3, "/unique")
+ require.NoError(t, err)
+ require.EqualValues(t, unique, unique3)
+}
- _, ok = m["foo2/bar"]
- require.True(t, ok)
+func readFileInImage(ctx context.Context, t *testing.T, c *Client, ref, path string) ([]byte, error) {
+ def, err := llb.Image(ref).Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ destDir := t.TempDir()
+
+ _, err = c.Solve(ctx, def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ if err != nil {
+ return nil, err
+ }
+ return os.ReadFile(filepath.Join(destDir, filepath.Clean(path)))
}
-// #296
-func testSchema1Image(t *testing.T, sb integration.Sandbox) {
+func testCachedMounts(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- st := llb.Image("gcr.io/google_containers/pause:3.0@sha256:0d093c962a6c2dd8bb8727b661e2b5f13e9df884af9945b4cc7088d9350cd3ee")
+ busybox := llb.Image("busybox:latest")
+ // setup base for one of the cache sources
+ st := busybox.Run(llb.Shlex(`sh -c "echo -n base > baz"`), llb.Dir("/wd"))
+ base := st.AddMount("/wd", llb.Scratch())
+
+ st = busybox.Run(llb.Shlex(`sh -c "echo -n first > foo"`), llb.Dir("/wd"))
+ st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
+ st = st.Run(llb.Shlex(`sh -c "cat foo && echo -n second > /wd2/bar"`), llb.Dir("/wd"))
+ st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
+ st.AddMount("/wd2", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared))
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
@@ -4415,1122 +4891,3571 @@ func testSchema1Image(t *testing.T, sb integration.Sandbox) {
_, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
-}
-
-// #319
-func testMountWithNoSource(t *testing.T, sb integration.Sandbox) {
- c, err := New(sb.Context(), sb.Address())
+ // repeat to make sure cache works
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
require.NoError(t, err)
- defer c.Close()
- busybox := llb.Image("docker.io/library/busybox:latest")
- st := llb.Scratch()
+ // second build using cache directories
+ st = busybox.Run(llb.Shlex(`sh -c "cp /src0/foo . && cp /src1/bar . && cp /src1/baz ."`), llb.Dir("/wd"))
+ out := st.AddMount("/wd", llb.Scratch())
+ st.AddMount("/src0", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
+ st.AddMount("/src1", base, llb.AsPersistentCacheDir("mycache2", llb.CacheMountShared))
- var nilState llb.State
+ destDir := t.TempDir()
- // This should never actually be run, but we want to succeed
- // if it was, because we expect an error below, or a daemon
- // panic if the issue has regressed.
- run := busybox.Run(
- llb.Args([]string{"/bin/true"}),
- llb.AddMount("/nil", nilState, llb.SourcePath("/"), llb.Readonly))
+ def, err = out.Marshal(sb.Context())
+ require.NoError(t, err)
- st = run.AddMount("/mnt", st)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
- def, err := st.Marshal(sb.Context())
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
+ require.Equal(t, string(dt), "first")
- _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ dt, err = os.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
+ require.Equal(t, string(dt), "second")
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "baz"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "base")
checkAllReleasable(t, c, sb, true)
}
-// #324
-func testReadonlyRootFS(t *testing.T, sb integration.Sandbox) {
+func testSharedCacheMounts(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- busybox := llb.Image("docker.io/library/busybox:latest")
- st := llb.Scratch()
+ busybox := llb.Image("busybox:latest")
+ st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
+ st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
- // The path /foo should be unwriteable.
- run := busybox.Run(
- llb.ReadonlyRootFS(),
- llb.Args([]string{"/bin/touch", "/foo"}))
- st = run.AddMount("/mnt", st)
+ st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
+ st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
- def, err := st.Marshal(sb.Context())
+ out := busybox.Run(llb.Shlex("true"))
+ out.AddMount("/m1", st.Root())
+ out.AddMount("/m2", st2.Root())
+
+ def, err := out.Marshal(sb.Context())
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.Error(t, err)
- // Would prefer to detect more specifically "Read-only file
- // system" but that isn't exposed here (it is on the stdio
- // which we don't see).
- require.Contains(t, err.Error(), "process \"/bin/touch /foo\" did not complete successfully")
-
- checkAllReleasable(t, c, sb, true)
+ require.NoError(t, err)
}
-func testSourceMap(t *testing.T, sb integration.Sandbox) {
+// #2334
+func testSharedCacheMountsNoScratch(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- sm1 := llb.NewSourceMap(nil, "foo", []byte("data1"))
- sm2 := llb.NewSourceMap(nil, "bar", []byte("data2"))
+ busybox := llb.Image("busybox:latest")
+ st := busybox.Run(llb.Shlex(`sh -e -c "touch one; while [[ ! -f two ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
+ st.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
- st := llb.Scratch().Run(
- llb.Shlex("not-exist"),
- sm1.Location([]*pb.Range{{Start: pb.Position{Line: 7}}}),
- sm2.Location([]*pb.Range{{Start: pb.Position{Line: 8}}}),
- sm1.Location([]*pb.Range{{Start: pb.Position{Line: 9}}}),
- )
+ st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; while [[ ! -f one ]]; do ls -l; usleep 500000; done"`), llb.Dir("/wd"))
+ st2.AddMount("/wd", llb.Image("busybox:latest"), llb.AsPersistentCacheDir("mycache1", llb.CacheMountShared))
- def, err := st.Marshal(sb.Context())
+ out := busybox.Run(llb.Shlex("true"))
+ out.AddMount("/m1", st.Root())
+ out.AddMount("/m2", st2.Root())
+
+ def, err := out.Marshal(sb.Context())
require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
- require.Error(t, err)
+ require.NoError(t, err)
+}
- srcs := errdefs.Sources(err)
- require.Equal(t, 3, len(srcs))
+func testLockedCacheMounts(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
- // Source errors are wrapped in the order provided as llb.ConstraintOpts, so
- // when they are unwrapped, the first unwrapped error is the last location
- // provided.
- require.Equal(t, "foo", srcs[0].Info.Filename)
- require.Equal(t, []byte("data1"), srcs[0].Info.Data)
- require.Nil(t, srcs[0].Info.Definition)
+ busybox := llb.Image("busybox:latest")
+ st := busybox.Run(llb.Shlex(`sh -e -c "touch one; if [[ -f two ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f two ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd"))
+ st.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
- require.Equal(t, 1, len(srcs[0].Ranges))
- require.Equal(t, int32(9), srcs[0].Ranges[0].Start.Line)
- require.Equal(t, int32(0), srcs[0].Ranges[0].Start.Character)
-
- require.Equal(t, "bar", srcs[1].Info.Filename)
- require.Equal(t, []byte("data2"), srcs[1].Info.Data)
- require.Nil(t, srcs[1].Info.Definition)
+ st2 := busybox.Run(llb.Shlex(`sh -e -c "touch two; if [[ -f one ]]; then exit 0; fi; for i in $(seq 10); do if [[ -f one ]]; then exit 1; fi; usleep 200000; done"`), llb.Dir("/wd"))
+ st2.AddMount("/wd", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
- require.Equal(t, 1, len(srcs[1].Ranges))
- require.Equal(t, int32(8), srcs[1].Ranges[0].Start.Line)
- require.Equal(t, int32(0), srcs[1].Ranges[0].Start.Character)
+ out := busybox.Run(llb.Shlex("true"))
+ out.AddMount("/m1", st.Root())
+ out.AddMount("/m2", st2.Root())
- require.Equal(t, "foo", srcs[2].Info.Filename)
- require.Equal(t, []byte("data1"), srcs[2].Info.Data)
- require.Nil(t, srcs[2].Info.Definition)
+ def, err := out.Marshal(sb.Context())
+ require.NoError(t, err)
- require.Equal(t, 1, len(srcs[2].Ranges))
- require.Equal(t, int32(7), srcs[2].Ranges[0].Start.Line)
- require.Equal(t, int32(0), srcs[2].Ranges[0].Start.Character)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
}
-func testSourceMapFromRef(t *testing.T, sb integration.Sandbox) {
+func testDuplicateCacheMount(t *testing.T, sb integration.Sandbox) {
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- srcState := llb.Scratch().File(
- llb.Mkfile("foo", 0600, []byte("data")))
- sm := llb.NewSourceMap(&srcState, "bar", []byte("bardata"))
-
- frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
- st := llb.Scratch().File(
- llb.Mkdir("foo/bar", 0600), //fails because /foo doesn't exist
- sm.Location([]*pb.Range{{Start: pb.Position{Line: 3, Character: 1}}}),
- )
+ busybox := llb.Image("busybox:latest")
- def, err := st.Marshal(sb.Context())
- if err != nil {
- return nil, err
- }
+ out := busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m2/foo ]]; touch /m1/foo; [[ -f /m2/foo ]];"`))
+ out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
+ out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
- res, err := c.Solve(ctx, gateway.SolveRequest{
- Definition: def.ToPB(),
- })
- if err != nil {
- return nil, err
- }
+ def, err := out.Marshal(sb.Context())
+ require.NoError(t, err)
- ref, err := res.SingleRef()
- if err != nil {
- return nil, err
- }
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
+}
- st2, err := ref.ToState()
- if err != nil {
- return nil, err
- }
+func testRunCacheWithMounts(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
- st = llb.Scratch().File(
- llb.Copy(st2, "foo", "foo2"),
- )
+ busybox := llb.Image("busybox:latest")
- def, err = st.Marshal(sb.Context())
- if err != nil {
- return nil, err
- }
+ out := busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`))
+ out.AddMount("/m1", llb.Image("alpine:latest"), llb.Readonly)
- return c.Solve(ctx, gateway.SolveRequest{
- Definition: def.ToPB(),
- })
- }
+ def, err := out.Marshal(sb.Context())
+ require.NoError(t, err)
- _, err = c.Build(sb.Context(), SolveOpt{}, "", frontend, nil)
- require.Error(t, err)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
- srcs := errdefs.Sources(err)
- require.Equal(t, 1, len(srcs))
+ out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/sbin/apk ]]"`))
+ out.AddMount("/m1", llb.Image("busybox:latest"), llb.Readonly)
- require.Equal(t, "bar", srcs[0].Info.Filename)
- require.Equal(t, []byte("bardata"), srcs[0].Info.Data)
- require.NotNil(t, srcs[0].Info.Definition)
+ def, err = out.Marshal(sb.Context())
+ require.NoError(t, err)
- require.Equal(t, 1, len(srcs[0].Ranges))
- require.Equal(t, int32(3), srcs[0].Ranges[0].Start.Line)
- require.Equal(t, int32(1), srcs[0].Ranges[0].Start.Character)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
}
-func testRmSymlink(t *testing.T, sb integration.Sandbox) {
+func testCacheMountNoCache(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- // Test that if FileOp.Rm is called on a symlink, then
- // the symlink is removed rather than the target
- mnt := llb.Image("alpine").
- Run(llb.Shlex("touch /mnt/target")).
- AddMount("/mnt", llb.Scratch())
+ busybox := llb.Image("busybox:latest")
- mnt = llb.Image("alpine").
- Run(llb.Shlex("ln -s target /mnt/link")).
- AddMount("/mnt", mnt)
+ out := busybox.Run(llb.Shlex(`sh -e -c "touch /m1/foo; touch /m2/bar"`))
+ out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
+ out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked))
- def, err := mnt.File(llb.Rm("link")).Marshal(sb.Context())
+ def, err := out.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
require.NoError(t, err)
- defer os.RemoveAll(destDir)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- }, nil)
+ out = busybox.Run(llb.Shlex(`sh -e -c "[[ ! -f /m1/foo ]]; touch /m1/foo2;"`), llb.IgnoreCache)
+ out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
+
+ def, err = out.Marshal(sb.Context())
require.NoError(t, err)
- require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.CreateFile("target", nil, 0644)))
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
+
+ out = busybox.Run(llb.Shlex(`sh -e -c "[[ -f /m1/foo2 ]]; [[ -f /m2/bar ]];"`))
+ out.AddMount("/m1", llb.Scratch(), llb.AsPersistentCacheDir("mycache1", llb.CacheMountLocked))
+ out.AddMount("/m2", llb.Scratch(), llb.AsPersistentCacheDir("mycache2", llb.CacheMountLocked))
+
+ def, err = out.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
}
-func testProxyEnv(t *testing.T, sb integration.Sandbox) {
+func testCopyFromEmptyImage(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- base := llb.Image("docker.io/library/busybox:latest").Dir("/out")
- cmd := `sh -c "echo -n $HTTP_PROXY-$HTTPS_PROXY-$NO_PROXY-$no_proxy-$ALL_PROXY-$all_proxy > env"`
+ for _, image := range []llb.State{llb.Scratch(), llb.Image("tonistiigi/test:nolayers")} {
+ st := llb.Scratch().File(llb.Copy(image, "/", "/"))
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
- st := base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{
- HTTPProxy: "httpvalue",
- HTTPSProxy: "httpsvalue",
- NoProxy: "noproxyvalue",
- AllProxy: "allproxyvalue",
- }))
- out := st.AddMount("/out", llb.Scratch())
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
- def, err := out.Marshal(sb.Context())
+ st = llb.Scratch().File(llb.Copy(image, "/foo", "/"))
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "/foo: no such file or directory")
+
+ busybox := llb.Image("busybox:latest")
+
+ out := busybox.Run(llb.Shlex(`sh -e -c '[ $(ls /scratch | wc -l) = '0' ]'`))
+ out.AddMount("/scratch", image, llb.Readonly)
+
+ def, err = out.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
+ }
+}
+
+// containerd/containerd#2119
+func testDuplicateWhiteouts(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+ }
+
+ run(`sh -e -c "mkdir -p d0 d1; echo -n first > d1/bar;"`)
+ run(`sh -c "rm -rf d0 d1"`)
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
+ destDir := t.TempDir()
+
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
require.NoError(t, err)
- defer os.RemoveAll(destDir)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDir,
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
},
},
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "env"))
+ dt, err := os.ReadFile(out)
require.NoError(t, err)
- require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue")
-
- // repeat to make sure proxy doesn't change cache
- st = base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{
- HTTPSProxy: "httpsvalue2",
- NoProxy: "noproxyvalue2",
- }))
- out = st.AddMount("/out", llb.Scratch())
- def, err = out.Marshal(sb.Context())
+ m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
- destDir, err = ioutil.TempDir("", "buildkit")
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
- defer os.RemoveAll(destDir)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- }, nil)
+ var mfst ocispecs.Manifest
+ err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "env"))
+ lastLayer := mfst.Layers[len(mfst.Layers)-1]
+
+ layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()]
+ require.True(t, ok)
+
+ m, err = testutil.ReadTarToMap(layer.Data, true)
require.NoError(t, err)
- require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue")
+
+ _, ok = m[".wh.d0"]
+ require.True(t, ok)
+
+ _, ok = m[".wh.d1"]
+ require.True(t, ok)
+
+ // check for a bug that added whiteout for subfile
+ _, ok = m["d1/.wh.bar"]
+ require.True(t, !ok)
}
-func testMergeOp(t *testing.T, sb integration.Sandbox) {
+// #276
+func testWhiteoutParentDir(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
requiresLinux(t)
-
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- ctx := sb.Context()
- registry, err := sb.NewRegistry()
- if !errors.Is(err, integration.ErrRequirements) {
- require.NoError(t, err)
- }
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
- var imageTarget string
- if os.Getenv("TEST_DOCKERD") == "1" {
- // do image export but use a fake url as the image should just end up in moby's
- // local store
- imageTarget = "fake.invalid:33333/buildkit/testmergeop:latest"
- } else if registry != "" {
- imageTarget = registry + "/buildkit/testmergeop:latest"
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
- stateA := llb.Scratch().
- File(llb.Mkfile("/foo", 0755, []byte("A"))).
- File(llb.Mkfile("/a", 0755, []byte("A"))).
- File(llb.Mkdir("/bar", 0700)).
- File(llb.Mkfile("/bar/A", 0755, []byte("A")))
- stateB := stateA.
- File(llb.Rm("/foo")).
- File(llb.Mkfile("/b", 0755, []byte("B"))).
- File(llb.Mkfile("/bar/B", 0754, []byte("B")))
- stateC := llb.Scratch().
- File(llb.Mkfile("/foo", 0755, []byte("C"))).
- File(llb.Mkfile("/c", 0755, []byte("C"))).
- File(llb.Mkdir("/bar", 0755)).
- File(llb.Mkfile("/bar/A", 0400, []byte("C")))
+ run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`)
+ run(`rm foo/bar`)
- mergeA := llb.Merge([]llb.State{stateA, stateC})
- requireContents(ctx, t, c, sb, mergeA, nil, nil, imageTarget,
- fstest.CreateFile("foo", []byte("C"), 0755),
- fstest.CreateFile("c", []byte("C"), 0755),
- fstest.CreateDir("bar", 0755),
- fstest.CreateFile("bar/A", []byte("C"), 0400),
- fstest.CreateFile("a", []byte("A"), 0755),
- )
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
- mergeB := llb.Merge([]llb.State{stateC, stateB})
- requireContents(ctx, t, c, sb, mergeB, nil, nil, imageTarget,
- fstest.CreateFile("a", []byte("A"), 0755),
- fstest.CreateFile("b", []byte("B"), 0755),
- fstest.CreateFile("c", []byte("C"), 0755),
- fstest.CreateDir("bar", 0700),
- fstest.CreateFile("bar/A", []byte("A"), 0755),
- fstest.CreateFile("bar/B", []byte("B"), 0754),
- )
+ destDir := t.TempDir()
- stateD := llb.Scratch().File(llb.Mkdir("/qaz", 0755))
- mergeC := llb.Merge([]llb.State{mergeA, mergeB, stateD})
- requireContents(ctx, t, c, sb, mergeC, nil, nil, imageTarget,
- fstest.CreateFile("a", []byte("A"), 0755),
- fstest.CreateFile("b", []byte("B"), 0755),
- fstest.CreateFile("c", []byte("C"), 0755),
- fstest.CreateDir("bar", 0700),
- fstest.CreateFile("bar/A", []byte("A"), 0755),
- fstest.CreateFile("bar/B", []byte("B"), 0754),
- fstest.CreateDir("qaz", 0755),
- )
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
- runA := runShell(llb.Merge([]llb.State{llb.Image("alpine"), mergeC}),
- // turn /a file into a dir, mv b and c into it
- "rm /a",
- "mkdir /a",
- "mv /b /c /a/",
- // remove+recreate /bar to make it opaque on overlay snapshotters
- "rm -rf /bar",
- "mkdir -m 0755 /bar",
- "echo -n D > /bar/D",
- // turn /qaz dir into a file
- "rm -rf /qaz",
- "touch /qaz",
- )
- stateE := llb.Scratch().
- File(llb.Mkfile("/foo", 0755, []byte("E"))).
- File(llb.Mkdir("/bar", 0755)).
- File(llb.Mkfile("/bar/A", 0755, []byte("A"))).
- File(llb.Mkfile("/bar/E", 0755, nil))
- mergeD := llb.Merge([]llb.State{stateE, runA})
- requireEqualContents(ctx, t, c, mergeD, llb.Image("alpine").
- File(llb.Mkdir("a", 0755)).
- File(llb.Mkfile("a/b", 0755, []byte("B"))).
- File(llb.Mkfile("a/c", 0755, []byte("C"))).
- File(llb.Mkdir("bar", 0755)).
- File(llb.Mkfile("bar/D", 0644, []byte("D"))).
- File(llb.Mkfile("bar/E", 0755, nil)).
- File(llb.Mkfile("qaz", 0644, nil)),
- // /foo from stateE is not here because it is deleted in stateB, which is part of a submerge of mergeD
- )
-}
+ dt, err := os.ReadFile(out)
+ require.NoError(t, err)
-func testMergeOpCacheInline(t *testing.T, sb integration.Sandbox) {
- testMergeOpCache(t, sb, "inline")
-}
+ m, err := testutil.ReadTarToMap(dt, false)
+ require.NoError(t, err)
-func testMergeOpCacheMin(t *testing.T, sb integration.Sandbox) {
- testMergeOpCache(t, sb, "min")
-}
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
+ require.NoError(t, err)
-func testMergeOpCacheMax(t *testing.T, sb integration.Sandbox) {
- testMergeOpCache(t, sb, "max")
-}
+ var mfst ocispecs.Manifest
+ err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
+ require.NoError(t, err)
-func testMergeOpCache(t *testing.T, sb integration.Sandbox, mode string) {
- t.Helper()
- skipDockerd(t, sb)
- requiresLinux(t)
+ lastLayer := mfst.Layers[len(mfst.Layers)-1]
- cdAddress := sb.ContainerdAddress()
- if cdAddress == "" {
- t.Skip("test requires containerd worker")
- }
+ layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()]
+ require.True(t, ok)
- client, err := newContainerd(cdAddress)
+ m, err = testutil.ReadTarToMap(layer.Data, true)
require.NoError(t, err)
- defer client.Close()
- ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+ _, ok = m["foo/.wh.bar"]
+ require.True(t, ok)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
- require.NoError(t, err)
+ _, ok = m["foo/"]
+ require.True(t, ok)
+}
+// #2490
+func testMoveParentDir(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- // push the busybox image to the mutable registry
- sourceImage := "busybox:latest"
- def, err := llb.Image(sourceImage).Marshal(sb.Context())
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
+ }
+
+ run(`sh -c "mkdir -p foo; echo -n first > foo/bar;"`)
+ run(`mv foo foo2`)
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- busyboxTargetNoTag := registry + "/buildkit/testlazyimage:"
- busyboxTarget := busyboxTargetNoTag + "latest"
+ destDir := t.TempDir()
+
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": busyboxTarget,
- "push": "true",
- },
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
},
},
}, nil)
require.NoError(t, err)
- imageService := client.ImageService()
- contentStore := client.ContentStore()
+ dt, err := os.ReadFile(out)
+ require.NoError(t, err)
- busyboxImg, err := imageService.Get(ctx, busyboxTarget)
+ m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
- busyboxManifest, err := images.Manifest(ctx, contentStore, busyboxImg.Target, nil)
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
require.NoError(t, err)
- for _, layer := range busyboxManifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.NoError(t, err)
- }
+ var mfst ocispecs.Manifest
+ err = json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst)
+ require.NoError(t, err)
- // clear all local state out
- err = imageService.Delete(ctx, busyboxImg.Name, images.SynchronousDelete())
+ lastLayer := mfst.Layers[len(mfst.Layers)-1]
+
+ layer, ok := m["blobs/sha256/"+lastLayer.Digest.Hex()]
+ require.True(t, ok)
+
+ m, err = testutil.ReadTarToMap(layer.Data, true)
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
- for _, layer := range busyboxManifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
- }
+ _, ok = m[".wh.foo"]
+ require.True(t, ok)
- // make a new merge that includes the lazy busybox as a base and exports inline cache
- input1 := llb.Scratch().
- File(llb.Mkdir("/dir", 0777)).
- File(llb.Mkfile("/dir/1", 0777, nil))
- input1Copy := llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true}))
+ _, ok = m["foo2/"]
+ require.True(t, ok)
- // put random contents in the file to ensure it's not re-run later
- input2 := runShell(llb.Image("alpine:latest"),
- "mkdir /dir",
- "cat /dev/urandom | head -c 100 | sha256sum > /dir/2")
- input2Copy := llb.Scratch().File(llb.Copy(input2, "/dir/2", "/bar/2", &llb.CopyInfo{CreateDestPath: true}))
+ _, ok = m["foo2/bar"]
+ require.True(t, ok)
+}
- merge := llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy})
+// #296
+func testSchema1Image(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
- def, err = merge.Marshal(sb.Context())
+ st := llb.Image("gcr.io/google_containers/pause:3.0@sha256:0d093c962a6c2dd8bb8727b661e2b5f13e9df884af9945b4cc7088d9350cd3ee")
+
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- target := registry + "/buildkit/testmerge:latest"
- cacheTarget := registry + "/buildkit/testmergecache:latest"
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
- var cacheExports []CacheOptionsEntry
- var cacheImports []CacheOptionsEntry
- switch mode {
- case "inline":
- cacheExports = []CacheOptionsEntry{{
- Type: "inline",
- }}
- cacheImports = []CacheOptionsEntry{{
- Type: "registry",
- Attrs: map[string]string{
- "ref": target,
- },
- }}
- case "min":
- cacheExports = []CacheOptionsEntry{{
- Type: "registry",
- Attrs: map[string]string{
- "ref": cacheTarget,
- },
- }}
- cacheImports = []CacheOptionsEntry{{
- Type: "registry",
- Attrs: map[string]string{
- "ref": cacheTarget,
+ checkAllReleasable(t, c, sb, true)
+}
+
+// #319
+func testMountWithNoSource(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ busybox := llb.Image("docker.io/library/busybox:latest")
+ st := llb.Scratch()
+
+ var nilState llb.State
+
+ // This should never actually be run, but we want to succeed
+ // if it was, because we expect an error below, or a daemon
+ // panic if the issue has regressed.
+ run := busybox.Run(
+ llb.Args([]string{"/bin/true"}),
+ llb.AddMount("/nil", nilState, llb.SourcePath("/"), llb.Readonly))
+
+ st = run.AddMount("/mnt", st)
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
+
+ checkAllReleasable(t, c, sb, true)
+}
+
+// #324
+func testReadonlyRootFS(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ busybox := llb.Image("docker.io/library/busybox:latest")
+ st := llb.Scratch()
+
+ // The path /foo should be unwriteable.
+ run := busybox.Run(
+ llb.ReadonlyRootFS(),
+ llb.Args([]string{"/bin/touch", "/foo"}))
+ st = run.AddMount("/mnt", st)
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
+ // Would prefer to detect more specifically "Read-only file
+ // system" but that isn't exposed here (it is on the stdio
+ // which we don't see).
+ require.Contains(t, err.Error(), "process \"/bin/touch /foo\" did not complete successfully")
+
+ checkAllReleasable(t, c, sb, true)
+}
+
+func testSourceMap(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ sm1 := llb.NewSourceMap(nil, "foo", []byte("data1"))
+ sm2 := llb.NewSourceMap(nil, "bar", []byte("data2"))
+
+ st := llb.Scratch().Run(
+ llb.Shlex("not-exist"),
+ sm1.Location([]*pb.Range{{Start: pb.Position{Line: 7}}}),
+ sm2.Location([]*pb.Range{{Start: pb.Position{Line: 8}}}),
+ sm1.Location([]*pb.Range{{Start: pb.Position{Line: 9}}}),
+ )
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
+
+ srcs := errdefs.Sources(err)
+ require.Equal(t, 3, len(srcs))
+
+ // Source errors are wrapped in the order provided as llb.ConstraintOpts, so
+ // when they are unwrapped, the first unwrapped error is the last location
+ // provided.
+ require.Equal(t, "foo", srcs[0].Info.Filename)
+ require.Equal(t, []byte("data1"), srcs[0].Info.Data)
+ require.Nil(t, srcs[0].Info.Definition)
+
+ require.Equal(t, 1, len(srcs[0].Ranges))
+ require.Equal(t, int32(9), srcs[0].Ranges[0].Start.Line)
+ require.Equal(t, int32(0), srcs[0].Ranges[0].Start.Character)
+
+ require.Equal(t, "bar", srcs[1].Info.Filename)
+ require.Equal(t, []byte("data2"), srcs[1].Info.Data)
+ require.Nil(t, srcs[1].Info.Definition)
+
+ require.Equal(t, 1, len(srcs[1].Ranges))
+ require.Equal(t, int32(8), srcs[1].Ranges[0].Start.Line)
+ require.Equal(t, int32(0), srcs[1].Ranges[0].Start.Character)
+
+ require.Equal(t, "foo", srcs[2].Info.Filename)
+ require.Equal(t, []byte("data1"), srcs[2].Info.Data)
+ require.Nil(t, srcs[2].Info.Definition)
+
+ require.Equal(t, 1, len(srcs[2].Ranges))
+ require.Equal(t, int32(7), srcs[2].Ranges[0].Start.Line)
+ require.Equal(t, int32(0), srcs[2].Ranges[0].Start.Character)
+}
+
+func testSourceMapFromRef(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ srcState := llb.Scratch().File(
+ llb.Mkfile("foo", 0600, []byte("data")))
+ sm := llb.NewSourceMap(&srcState, "bar", []byte("bardata"))
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.Scratch().File(
+ llb.Mkdir("foo/bar", 0600), //fails because /foo doesn't exist
+ sm.Location([]*pb.Range{{Start: pb.Position{Line: 3, Character: 1}}}),
+ )
+
+ def, err := st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ ref, err := res.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+
+ st2, err := ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+
+ st = llb.Scratch().File(
+ llb.Copy(st2, "foo", "foo2"),
+ )
+
+ def, err = st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
+
+ return c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ }
+
+ _, err = c.Build(sb.Context(), SolveOpt{}, "", frontend, nil)
+ require.Error(t, err)
+
+ srcs := errdefs.Sources(err)
+ require.Equal(t, 1, len(srcs))
+
+ require.Equal(t, "bar", srcs[0].Info.Filename)
+ require.Equal(t, []byte("bardata"), srcs[0].Info.Data)
+ require.NotNil(t, srcs[0].Info.Definition)
+
+ require.Equal(t, 1, len(srcs[0].Ranges))
+ require.Equal(t, int32(3), srcs[0].Ranges[0].Start.Line)
+ require.Equal(t, int32(1), srcs[0].Ranges[0].Start.Character)
+}
+
+func testRmSymlink(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ // Test that if FileOp.Rm is called on a symlink, then
+ // the symlink is removed rather than the target
+ mnt := llb.Image("alpine").
+ Run(llb.Shlex("touch /mnt/target")).
+ AddMount("/mnt", llb.Scratch())
+
+ mnt = llb.Image("alpine").
+ Run(llb.Shlex("ln -s target /mnt/link")).
+ AddMount("/mnt", mnt)
+
+ def, err := mnt.File(llb.Rm("link")).Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.CreateFile("target", nil, 0644)))
+}
+
+func testProxyEnv(t *testing.T, sb integration.Sandbox) {
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ base := llb.Image("docker.io/library/busybox:latest").Dir("/out")
+ cmd := `sh -c "echo -n $HTTP_PROXY-$HTTPS_PROXY-$NO_PROXY-$no_proxy-$ALL_PROXY-$all_proxy > env"`
+
+ st := base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{
+ HTTPProxy: "httpvalue",
+ HTTPSProxy: "httpsvalue",
+ NoProxy: "noproxyvalue",
+ AllProxy: "allproxyvalue",
+ }))
+ out := st.AddMount("/out", llb.Scratch())
+
+ def, err := out.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "env"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue")
+
+ // repeat to make sure proxy doesn't change cache
+ st = base.Run(llb.Shlex(cmd), llb.WithProxy(llb.ProxyEnv{
+ HTTPSProxy: "httpsvalue2",
+ NoProxy: "noproxyvalue2",
+ }))
+ out = st.AddMount("/out", llb.Scratch())
+
+ def, err = out.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir = t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "env"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "httpvalue-httpsvalue-noproxyvalue-noproxyvalue-allproxyvalue-allproxyvalue")
+}
+
+func testMergeOp(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ ctx := sb.Context()
+ registry, err := sb.NewRegistry()
+ if !errors.Is(err, integration.ErrRequirements) {
+ require.NoError(t, err)
+ }
+
+ var imageTarget string
+ if integration.IsTestDockerdMoby(sb) {
+ // do image export but use a fake url as the image should just end up in moby's
+ // local store
+ imageTarget = "fake.invalid:33333/buildkit/testmergeop:latest"
+ } else if registry != "" {
+ imageTarget = registry + "/buildkit/testmergeop:latest"
+ }
+
+ stateA := llb.Scratch().
+ File(llb.Mkfile("/foo", 0755, []byte("A"))).
+ File(llb.Mkfile("/a", 0755, []byte("A"))).
+ File(llb.Mkdir("/bar", 0700)).
+ File(llb.Mkfile("/bar/A", 0755, []byte("A")))
+ stateB := stateA.
+ File(llb.Rm("/foo")).
+ File(llb.Mkfile("/b", 0755, []byte("B"))).
+ File(llb.Mkfile("/bar/B", 0754, []byte("B")))
+ stateC := llb.Scratch().
+ File(llb.Mkfile("/foo", 0755, []byte("C"))).
+ File(llb.Mkfile("/c", 0755, []byte("C"))).
+ File(llb.Mkdir("/bar", 0755)).
+ File(llb.Mkfile("/bar/A", 0400, []byte("C")))
+
+ mergeA := llb.Merge([]llb.State{stateA, stateC})
+ requireContents(ctx, t, c, sb, mergeA, nil, nil, imageTarget,
+ fstest.CreateFile("foo", []byte("C"), 0755),
+ fstest.CreateFile("c", []byte("C"), 0755),
+ fstest.CreateDir("bar", 0755),
+ fstest.CreateFile("bar/A", []byte("C"), 0400),
+ fstest.CreateFile("a", []byte("A"), 0755),
+ )
+
+ mergeB := llb.Merge([]llb.State{stateC, stateB})
+ requireContents(ctx, t, c, sb, mergeB, nil, nil, imageTarget,
+ fstest.CreateFile("a", []byte("A"), 0755),
+ fstest.CreateFile("b", []byte("B"), 0755),
+ fstest.CreateFile("c", []byte("C"), 0755),
+ fstest.CreateDir("bar", 0700),
+ fstest.CreateFile("bar/A", []byte("A"), 0755),
+ fstest.CreateFile("bar/B", []byte("B"), 0754),
+ )
+
+ stateD := llb.Scratch().File(llb.Mkdir("/qaz", 0755))
+ mergeC := llb.Merge([]llb.State{mergeA, mergeB, stateD})
+ requireContents(ctx, t, c, sb, mergeC, nil, nil, imageTarget,
+ fstest.CreateFile("a", []byte("A"), 0755),
+ fstest.CreateFile("b", []byte("B"), 0755),
+ fstest.CreateFile("c", []byte("C"), 0755),
+ fstest.CreateDir("bar", 0700),
+ fstest.CreateFile("bar/A", []byte("A"), 0755),
+ fstest.CreateFile("bar/B", []byte("B"), 0754),
+ fstest.CreateDir("qaz", 0755),
+ )
+
+ runA := runShell(llb.Merge([]llb.State{llb.Image("alpine"), mergeC}),
+ // turn /a file into a dir, mv b and c into it
+ "rm /a",
+ "mkdir /a",
+ "mv /b /c /a/",
+ // remove+recreate /bar to make it opaque on overlay snapshotters
+ "rm -rf /bar",
+ "mkdir -m 0755 /bar",
+ "echo -n D > /bar/D",
+ // turn /qaz dir into a file
+ "rm -rf /qaz",
+ "touch /qaz",
+ )
+ stateE := llb.Scratch().
+ File(llb.Mkfile("/foo", 0755, []byte("E"))).
+ File(llb.Mkdir("/bar", 0755)).
+ File(llb.Mkfile("/bar/A", 0755, []byte("A"))).
+ File(llb.Mkfile("/bar/E", 0755, nil))
+ mergeD := llb.Merge([]llb.State{stateE, runA})
+ requireEqualContents(ctx, t, c, mergeD, llb.Image("alpine").
+ File(llb.Mkdir("a", 0755)).
+ File(llb.Mkfile("a/b", 0755, []byte("B"))).
+ File(llb.Mkfile("a/c", 0755, []byte("C"))).
+ File(llb.Mkdir("bar", 0755)).
+ File(llb.Mkfile("bar/D", 0644, []byte("D"))).
+ File(llb.Mkfile("bar/E", 0755, nil)).
+ File(llb.Mkfile("qaz", 0644, nil)),
+ // /foo from stateE is not here because it is deleted in stateB, which is part of a submerge of mergeD
+ )
+}
+
+func testMergeOpCacheInline(t *testing.T, sb integration.Sandbox) {
+ testMergeOpCache(t, sb, "inline")
+}
+
+func testMergeOpCacheMin(t *testing.T, sb integration.Sandbox) {
+ testMergeOpCache(t, sb, "min")
+}
+
+func testMergeOpCacheMax(t *testing.T, sb integration.Sandbox) {
+ testMergeOpCache(t, sb, "max")
+}
+
+func testMergeOpCache(t *testing.T, sb integration.Sandbox, mode string) {
+ t.Helper()
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" {
+ t.Skip("test requires containerd worker")
+ }
+
+ client, err := newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ // push the busybox image to the mutable registry
+ sourceImage := "busybox:latest"
+ def, err := llb.Image(sourceImage).Marshal(sb.Context())
+ require.NoError(t, err)
+
+ busyboxTargetNoTag := registry + "/buildkit/testlazyimage:"
+ busyboxTarget := busyboxTargetNoTag + "latest"
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": busyboxTarget,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ imageService := client.ImageService()
+ contentStore := client.ContentStore()
+
+ busyboxImg, err := imageService.Get(ctx, busyboxTarget)
+ require.NoError(t, err)
+
+ busyboxManifest, err := images.Manifest(ctx, contentStore, busyboxImg.Target, nil)
+ require.NoError(t, err)
+
+ for _, layer := range busyboxManifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.NoError(t, err)
+ }
+
+ // clear all local state out
+ err = imageService.Delete(ctx, busyboxImg.Name, images.SynchronousDelete())
+ require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
+
+ for _, layer := range busyboxManifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ }
+
+ // make a new merge that includes the lazy busybox as a base and exports inline cache
+ input1 := llb.Scratch().
+ File(llb.Mkdir("/dir", 0777)).
+ File(llb.Mkfile("/dir/1", 0777, nil))
+ input1Copy := llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true}))
+
+ // put random contents in the file to ensure it's not re-run later
+ input2 := runShell(llb.Image("alpine:latest"),
+ "mkdir /dir",
+ "cat /dev/urandom | head -c 100 | sha256sum > /dir/2")
+ input2Copy := llb.Scratch().File(llb.Copy(input2, "/dir/2", "/bar/2", &llb.CopyInfo{CreateDestPath: true}))
+
+ merge := llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy})
+
+ def, err = merge.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/testmerge:latest"
+ cacheTarget := registry + "/buildkit/testmergecache:latest"
+
+ var cacheExports []CacheOptionsEntry
+ var cacheImports []CacheOptionsEntry
+ switch mode {
+ case "inline":
+ cacheExports = []CacheOptionsEntry{{
+ Type: "inline",
+ }}
+ cacheImports = []CacheOptionsEntry{{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": target,
+ },
+ }}
+ case "min":
+ cacheExports = []CacheOptionsEntry{{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": cacheTarget,
+ },
+ }}
+ cacheImports = []CacheOptionsEntry{{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": cacheTarget,
+ },
+ }}
+ case "max":
+ cacheExports = []CacheOptionsEntry{{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": cacheTarget,
+ "mode": "max",
+ },
+ }}
+ cacheImports = []CacheOptionsEntry{{
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": cacheTarget,
+ },
+ }}
+ default:
+ require.Fail(t, "unknown cache mode: %s", mode)
+ }
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ },
+ },
+ CacheExports: cacheExports,
+ }, nil)
+ require.NoError(t, err)
+
+ // verify that the busybox image stayed lazy
+ for _, layer := range busyboxManifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ }
+
+ // get the random value at /bar/2
+ destDir := t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ bar2Contents, err := os.ReadFile(filepath.Join(destDir, "bar", "2"))
+ require.NoError(t, err)
+
+ // clear all local state out
+ img, err := imageService.Get(ctx, target)
+ require.NoError(t, err)
+
+ manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
+
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
+
+ for _, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ }
+
+ // re-run the same build with cache imports and verify everything stays lazy
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{{
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ }},
+ CacheImports: cacheImports,
+ CacheExports: cacheExports,
+ }, nil)
+ require.NoError(t, err)
+
+ // verify everything from before stayed lazy
+ img, err = imageService.Get(ctx, target)
+ require.NoError(t, err)
+
+ manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
+
+ for i, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i)
+ }
+
+ // re-run the build with a change only to input1 using the remote cache
+ input1 = llb.Scratch().
+ File(llb.Mkdir("/dir", 0777)).
+ File(llb.Mkfile("/dir/1", 0444, nil))
+ input1Copy = llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true}))
+
+ merge = llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy})
+
+ def, err = merge.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{{
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ }},
+ CacheExports: cacheExports,
+ CacheImports: cacheImports,
+ }, nil)
+ require.NoError(t, err)
+
+ // verify everything from before stayed lazy except the middle layer for input1Copy
+ img, err = imageService.Get(ctx, target)
+ require.NoError(t, err)
+
+ manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
+
+ for i, layer := range manifest.Layers {
+ switch i {
+ case 0, 2:
+ // bottom and top layer should stay lazy as they didn't change
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i)
+ case 1:
+ // middle layer had to be rebuilt, should exist locally
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.NoError(t, err)
+ default:
+ require.Fail(t, "unexpected layer index %d", i)
+ }
+ }
+
+ // check the random value at /bar/2 didn't change
+ destDir = t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ CacheImports: cacheImports,
+ }, nil)
+ require.NoError(t, err)
+
+ newBar2Contents, err := os.ReadFile(filepath.Join(destDir, "bar", "2"))
+ require.NoError(t, err)
+
+ require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed")
+
+ // Now test the case with a layer on top of a merge.
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
+
+ for _, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ }
+
+ mergePlusLayer := merge.File(llb.Mkfile("/3", 0444, nil))
+
+ def, err = mergePlusLayer.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{{
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ }},
+ CacheExports: cacheExports,
+ CacheImports: cacheImports,
+ }, nil)
+ require.NoError(t, err)
+
+ // check the random value at /bar/2 didn't change
+ destDir = t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ CacheImports: cacheImports,
+ }, nil)
+ require.NoError(t, err)
+
+ newBar2Contents, err = os.ReadFile(filepath.Join(destDir, "bar", "2"))
+ require.NoError(t, err)
+
+ require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed")
+
+ // clear local state, repeat the build, verify everything stays lazy
+ err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ require.NoError(t, err)
+ checkAllReleasable(t, c, sb, true)
+
+ for _, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ }
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{{
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "store": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
+ },
+ }},
+ CacheImports: cacheImports,
+ CacheExports: cacheExports,
+ }, nil)
+ require.NoError(t, err)
+
+ img, err = imageService.Get(ctx, target)
+ require.NoError(t, err)
+
+ manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ require.NoError(t, err)
+
+ for i, layer := range manifest.Layers {
+ _, err = contentStore.Info(ctx, layer.Digest)
+ require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i)
+ }
+}
+
+func requireContents(ctx context.Context, t *testing.T, c *Client, sb integration.Sandbox, state llb.State, cacheImports, cacheExports []CacheOptionsEntry, imageTarget string, files ...fstest.Applier) {
+ t.Helper()
+
+ def, err := state.Marshal(ctx)
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = c.Solve(ctx, def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ CacheImports: cacheImports,
+ CacheExports: cacheExports,
+ }, nil)
+ require.NoError(t, err)
+
+ require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.Apply(files...)))
+
+ if imageTarget != "" {
+ var exports []ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": imageTarget,
+ },
+ }}
+ } else {
+ exports = []ExportEntry{{
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": imageTarget,
+ "push": "true",
+ },
+ }}
+ }
+
+ _, err = c.Solve(ctx, def, SolveOpt{Exports: exports, CacheImports: cacheImports, CacheExports: cacheExports}, nil)
+ require.NoError(t, err)
+ resetState(t, c, sb)
+ requireContents(ctx, t, c, sb, llb.Image(imageTarget, llb.ResolveModePreferLocal), cacheImports, nil, "", files...)
+ }
+}
+
+func requireEqualContents(ctx context.Context, t *testing.T, c *Client, stateA, stateB llb.State) {
+ t.Helper()
+
+ defA, err := stateA.Marshal(ctx)
+ require.NoError(t, err)
+
+ destDirA := t.TempDir()
+
+ _, err = c.Solve(ctx, defA, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDirA,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ defB, err := stateB.Marshal(ctx)
+ require.NoError(t, err)
+
+ destDirB := t.TempDir()
+
+ _, err = c.Solve(ctx, defB, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDirB,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ require.NoError(t, fstest.CheckDirectoryEqual(destDirA, destDirB))
+}
+
+func runShellExecState(base llb.State, cmds ...string) llb.ExecState {
+ return base.Run(llb.Args([]string{"sh", "-c", strings.Join(cmds, " && ")}))
+}
+
+func runShell(base llb.State, cmds ...string) llb.State {
+ return runShellExecState(base, cmds...).Root()
+}
+
+func chainRunShells(base llb.State, cmdss ...[]string) llb.State {
+ for _, cmds := range cmdss {
+ base = runShell(base, cmds...)
+ }
+ return base
+}
+
+func requiresLinux(t *testing.T) {
+ if runtime.GOOS != "linux" {
+ t.Skipf("unsupported GOOS: %s", runtime.GOOS)
+ }
+}
+
+// ensurePruneAll tries to ensure Prune completes with retries.
+// Current cache implementation defers release-related logic using goroutine so
+// there can be situation where a build has finished but the following prune doesn't
+// cleanup cache because some records still haven't been released.
+// This function tries to ensure prune by retrying it.
+func ensurePruneAll(t *testing.T, c *Client, sb integration.Sandbox) {
+ for i := 0; i < 2; i++ {
+ require.NoError(t, c.Prune(sb.Context(), nil, PruneAll))
+ for j := 0; j < 20; j++ {
+ du, err := c.DiskUsage(sb.Context())
+ require.NoError(t, err)
+ if len(du) == 0 {
+ return
+ }
+ time.Sleep(500 * time.Millisecond)
+ }
+ t.Logf("retrying prune(%d)", i)
+ }
+ t.Fatalf("failed to ensure prune")
+}
+
+func checkAllReleasable(t *testing.T, c *Client, sb integration.Sandbox, checkContent bool) {
+ cl, err := c.ControlClient().ListenBuildHistory(sb.Context(), &controlapi.BuildHistoryRequest{
+ EarlyExit: true,
+ })
+ require.NoError(t, err)
+
+ for {
+ resp, err := cl.Recv()
+ if err == io.EOF {
+ break
+ }
+ require.NoError(t, err)
+ _, err = c.ControlClient().UpdateBuildHistory(sb.Context(), &controlapi.UpdateBuildHistoryRequest{
+ Ref: resp.Record.Ref,
+ Delete: true,
+ })
+ require.NoError(t, err)
+ }
+
+ retries := 0
+loop0:
+ for {
+ require.True(t, 20 > retries)
+ retries++
+ du, err := c.DiskUsage(sb.Context())
+ require.NoError(t, err)
+ for _, d := range du {
+ if d.InUse {
+ time.Sleep(500 * time.Millisecond)
+ continue loop0
+ }
+ }
+ break
+ }
+
+ err = c.Prune(sb.Context(), nil, PruneAll)
+ require.NoError(t, err)
+
+ du, err := c.DiskUsage(sb.Context())
+ require.NoError(t, err)
+ require.Equal(t, 0, len(du))
+
+ // examine contents of exported tars (requires containerd)
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" {
+ t.Logf("checkAllReleasable: skipping check for exported tars in non-containerd test")
+ return
+ }
+
+ // TODO: make public pull helper function so this can be checked for standalone as well
+
+ client, err := newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+ snapshotService := client.SnapshotService("overlayfs")
+
+ retries = 0
+ for {
+ count := 0
+ err = snapshotService.Walk(ctx, func(context.Context, snapshots.Info) error {
+ count++
+ return nil
+ })
+ require.NoError(t, err)
+ if count == 0 {
+ break
+ }
+ require.True(t, 20 > retries)
+ retries++
+ time.Sleep(500 * time.Millisecond)
+ }
+
+ if !checkContent {
+ return
+ }
+
+ retries = 0
+ for {
+ count := 0
+ var infos []content.Info
+ err = client.ContentStore().Walk(ctx, func(info content.Info) error {
+ count++
+ infos = append(infos, info)
+ return nil
+ })
+ require.NoError(t, err)
+ if count == 0 {
+ break
+ }
+ if retries >= 50 {
+ require.FailNowf(t, "content still exists", "%+v", infos)
+ }
+ retries++
+ time.Sleep(500 * time.Millisecond)
+ }
+}
+
+func testInvalidExporter(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ def, err := llb.Image("busybox:latest").Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ target := "example.com/buildkit/testoci:latest"
+ attrs := map[string]string{
+ "name": target,
+ }
+ for _, exp := range []string{ExporterOCI, ExporterDocker} {
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: exp,
+ Attrs: attrs,
+ },
+ },
+ }, nil)
+ // output file writer is required
+ require.Error(t, err)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: exp,
+ Attrs: attrs,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ // output directory is not supported
+ require.Error(t, err)
+ }
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ Attrs: attrs,
+ },
+ },
+ }, nil)
+ // output directory is required
+ require.Error(t, err)
+
+ f, err := os.Create(filepath.Join(destDir, "a"))
+ require.NoError(t, err)
+ defer f.Close()
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ Attrs: attrs,
+ Output: fixedWriteCloser(f),
+ },
+ },
+ }, nil)
+ // output file writer is not supported
+ require.Error(t, err)
+
+ checkAllReleasable(t, c, sb, true)
+}
+
+// moby/buildkit#492
+func testParallelLocalBuilds(t *testing.T, sb integration.Sandbox) {
+ ctx, cancel := context.WithCancel(sb.Context())
+ defer cancel()
+
+ c, err := New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ eg, ctx := errgroup.WithContext(ctx)
+
+ for i := 0; i < 3; i++ {
+ func(i int) {
+ eg.Go(func() error {
+ fn := fmt.Sprintf("test%d", i)
+ srcDir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile(fn, []byte("contents"), 0600),
+ )
+ require.NoError(t, err)
+
+ def, err := llb.Local("source").Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = c.Solve(ctx, def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ LocalDirs: map[string]string{
+ "source": srcDir,
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ act, err := os.ReadFile(filepath.Join(destDir, fn))
+ require.NoError(t, err)
+
+ require.Equal(t, "contents", string(act))
+ return nil
+ })
+ }(i)
+ }
+
+ err = eg.Wait()
+ require.NoError(t, err)
+}
+
+// testRelativeMountpoint is a test that relative paths for mountpoints don't
+// fail when runc is upgraded to at least rc95, which introduces an error when
+// mountpoints are not absolute. Relative paths should be transformed to
+// absolute points based on the llb.State's current working directory.
+func testRelativeMountpoint(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ id := identity.NewID()
+
+ st := llb.Image("busybox:latest").Dir("/root").Run(
+ llb.Shlexf("sh -c 'echo -n %s > /root/relpath/data'", id),
+ ).AddMount("relpath", llb.Scratch())
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "data"))
+ require.NoError(t, err)
+ require.Equal(t, dt, []byte(id))
+}
+
+// moby/buildkit#2476
+func testBuildInfoExporter(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.Image("busybox:latest").Run(
+ llb.Args([]string{"/bin/sh", "-c", `echo hello`}),
+ )
+ def, err := st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
+ return c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ }
+
+ var exports []ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": "reg.dummy:5000/buildkit/test:latest",
+ },
+ }}
+ } else {
+ exports = []ExportEntry{{
+ Type: ExporterOCI,
+ Attrs: map[string]string{},
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
+ }}
+ }
+
+ res, err := c.Build(sb.Context(), SolveOpt{
+ Exports: exports,
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo)
+ decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo])
+ require.NoError(t, err)
+
+ var exbi binfotypes.BuildInfo
+ err = json.Unmarshal(decbi, &exbi)
+ require.NoError(t, err)
+
+ require.Equal(t, len(exbi.Sources), 1)
+ require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage)
+ require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest")
+}
+
+// moby/buildkit#2476
+func testBuildInfoInline(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ st := llb.Image("busybox:latest").Run(
+ llb.Args([]string{"/bin/sh", "-c", `echo hello`}),
+ )
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" {
+ t.Skip("rest of test requires containerd worker")
+ }
+
+ client, err := newContainerd(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+
+ target := registry + "/buildkit/test-buildinfo:latest"
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
- }}
- case "max":
- cacheExports = []CacheOptionsEntry{{
- Type: "registry",
- Attrs: map[string]string{
- "ref": cacheTarget,
- "mode": "max",
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ img, err := client.GetImage(ctx, target)
+ require.NoError(t, err)
+
+ desc, err := img.Config(ctx)
+ require.NoError(t, err)
+
+ dt, err := content.ReadBlob(ctx, img.ContentStore(), desc)
+ require.NoError(t, err)
+
+ var config binfotypes.ImageConfig
+ require.NoError(t, json.Unmarshal(dt, &config))
+
+ dec, err := base64.StdEncoding.DecodeString(config.BuildInfo)
+ require.NoError(t, err)
+
+ var bi binfotypes.BuildInfo
+ require.NoError(t, json.Unmarshal(dec, &bi))
+
+ require.Equal(t, len(bi.Sources), 1)
+ require.Equal(t, bi.Sources[0].Type, binfotypes.SourceTypeDockerImage)
+ require.Equal(t, bi.Sources[0].Ref, "docker.io/library/busybox:latest")
+}
+
+func testBuildInfoNoExport(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.Image("busybox:latest").Run(
+ llb.Args([]string{"/bin/sh", "-c", `echo hello`}),
+ )
+ def, err := st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
+ return c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ }
+
+ res, err := c.Build(sb.Context(), SolveOpt{}, "", frontend, nil)
+ require.NoError(t, err)
+
+ require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo)
+ decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo])
+ require.NoError(t, err)
+
+ var exbi binfotypes.BuildInfo
+ err = json.Unmarshal(decbi, &exbi)
+ require.NoError(t, err)
+
+ require.Equal(t, len(exbi.Sources), 1)
+ require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage)
+ require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest")
+}
+
+func testPullWithLayerLimit(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ st := llb.Scratch().
+ File(llb.Mkfile("/first", 0644, []byte("first"))).
+ File(llb.Mkfile("/second", 0644, []byte("second"))).
+ File(llb.Mkfile("/third", 0644, []byte("third")))
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/testlayers:latest"
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ // pull 2 first layers
+ st = llb.Image(target, llb.WithLayerLimit(2)).
+ File(llb.Mkfile("/forth", 0644, []byte("forth")))
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{{
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ }},
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "first"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "first")
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "second"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "second")
+
+ _, err = os.ReadFile(filepath.Join(destDir, "third"))
+ require.Error(t, err)
+ require.True(t, errors.Is(err, os.ErrNotExist))
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "forth"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "forth")
+
+ // pull 3rd layer only
+ st = llb.Diff(
+ llb.Image(target, llb.WithLayerLimit(2)),
+ llb.Image(target)).
+ File(llb.Mkfile("/forth", 0644, []byte("forth")))
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ destDir = t.TempDir()
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{{
+ Type: ExporterLocal,
+ OutputDir: destDir,
+ }},
+ }, nil)
+ require.NoError(t, err)
+
+ _, err = os.ReadFile(filepath.Join(destDir, "first"))
+ require.Error(t, err)
+ require.True(t, errors.Is(err, os.ErrNotExist))
+
+ _, err = os.ReadFile(filepath.Join(destDir, "second"))
+ require.Error(t, err)
+ require.True(t, errors.Is(err, os.ErrNotExist))
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "third"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "third")
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "forth"))
+ require.NoError(t, err)
+ require.Equal(t, string(dt), "forth")
+
+ // zero limit errors cleanly
+ st = llb.Image(target, llb.WithLayerLimit(0))
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "invalid layer limit")
+}
+
+func testCallInfo(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureInfo)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+ _, err = c.Info(sb.Context())
+ require.NoError(t, err)
+}
+
+func testValidateDigestOrigin(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ st := llb.Image("busybox:latest").Run(llb.Shlex("touch foo"), llb.Dir("/wd")).AddMount("/wd", llb.Scratch())
+
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/testdigest:latest"
+
+ resp, err := c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dgst, ok := resp.ExporterResponse[exptypes.ExporterImageDigestKey]
+ require.True(t, ok)
+
+ err = c.Prune(sb.Context(), nil, PruneAll)
+ require.NoError(t, err)
+
+ st = llb.Image(target + "@" + dgst)
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.NoError(t, err)
+
+ // accessing the digest from invalid names should fail
+ st = llb.Image("example.invalid/nosuchrepo@" + dgst)
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
+
+ // also check repo that does exists but not digest
+ st = llb.Image("docker.io/library/ubuntu@" + dgst)
+
+ def, err = st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ _, err = c.Solve(sb.Context(), def, SolveOpt{}, nil)
+ require.Error(t, err)
+}
+
+func testExportAnnotations(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ amd64 := platforms.MustParse("linux/amd64")
+ arm64 := platforms.MustParse("linux/arm64")
+ ps := []ocispecs.Platform{amd64, arm64}
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ expPlatforms := &exptypes.Platforms{
+ Platforms: make([]exptypes.Platform, len(ps)),
+ }
+ for i, p := range ps {
+ st := llb.Scratch().File(
+ llb.Mkfile("platform", 0600, []byte(platforms.Format(p))),
+ )
+
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+
+ k := platforms.Format(p)
+ res.AddRef(k, ref)
+
+ expPlatforms.Platforms[i] = exptypes.Platform{
+ ID: k,
+ Platform: p,
+ }
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ res.AddMeta(exptypes.AnnotationIndexKey("gi"), []byte("generic index"))
+ res.AddMeta(exptypes.AnnotationIndexDescriptorKey("gid"), []byte("generic index descriptor"))
+ res.AddMeta(exptypes.AnnotationManifestKey(nil, "gm"), []byte("generic manifest"))
+ res.AddMeta(exptypes.AnnotationManifestDescriptorKey(nil, "gmd"), []byte("generic manifest descriptor"))
+ res.AddMeta(exptypes.AnnotationManifestKey(&amd64, "m"), []byte("amd64 manifest"))
+ res.AddMeta(exptypes.AnnotationManifestKey(&arm64, "m"), []byte("arm64 manifest"))
+ res.AddMeta(exptypes.AnnotationManifestDescriptorKey(&amd64, "md"), []byte("amd64 manifest descriptor"))
+ res.AddMeta(exptypes.AnnotationManifestDescriptorKey(&arm64, "md"), []byte("arm64 manifest descriptor"))
+ res.AddMeta(exptypes.AnnotationKey{Key: "gd"}.String(), []byte("generic default"))
+
+ return res, nil
+ }
+
+ // testing for image exporter
+
+ target := registry + "/buildkit/testannotations:latest"
+
+ const created = "2022-01-23T12:34:56Z"
+
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ "annotation-index.gio": "generic index opt",
+ "annotation-index." + ocispecs.AnnotationCreated: created,
+ "annotation-manifest.gmo": "generic manifest opt",
+ "annotation-manifest-descriptor.gmdo": "generic manifest descriptor opt",
+ "annotation-manifest[linux/amd64].mo": "amd64 manifest opt",
+ "annotation-manifest-descriptor[linux/amd64].mdo": "amd64 manifest descriptor opt",
+ "annotation-manifest[linux/arm64].mo": "arm64 manifest opt",
+ "annotation-manifest-descriptor[linux/arm64].mdo": "arm64 manifest descriptor opt",
+ },
},
- }}
- cacheImports = []CacheOptionsEntry{{
- Type: "registry",
- Attrs: map[string]string{
- "ref": cacheTarget,
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
+
+ require.Equal(t, "generic index", imgs.Index.Annotations["gi"])
+ require.Equal(t, "generic index opt", imgs.Index.Annotations["gio"])
+ require.Equal(t, created, imgs.Index.Annotations[ocispecs.AnnotationCreated])
+ for _, desc := range imgs.Index.Manifests {
+ require.Equal(t, "generic manifest descriptor", desc.Annotations["gmd"])
+ require.Equal(t, "generic manifest descriptor opt", desc.Annotations["gmdo"])
+ switch {
+ case platforms.Only(amd64).Match(*desc.Platform):
+ require.Equal(t, "amd64 manifest descriptor", desc.Annotations["md"])
+ require.Equal(t, "amd64 manifest descriptor opt", desc.Annotations["mdo"])
+ case platforms.Only(arm64).Match(*desc.Platform):
+ require.Equal(t, "arm64 manifest descriptor", desc.Annotations["md"])
+ require.Equal(t, "arm64 manifest descriptor opt", desc.Annotations["mdo"])
+ default:
+ require.Fail(t, "unrecognized platform")
+ }
+ }
+
+ amdImage := imgs.Find(platforms.Format(amd64))
+ require.Equal(t, "generic default", amdImage.Manifest.Annotations["gd"])
+ require.Equal(t, "generic manifest", amdImage.Manifest.Annotations["gm"])
+ require.Equal(t, "generic manifest opt", amdImage.Manifest.Annotations["gmo"])
+ require.Equal(t, "amd64 manifest", amdImage.Manifest.Annotations["m"])
+ require.Equal(t, "amd64 manifest opt", amdImage.Manifest.Annotations["mo"])
+
+ armImage := imgs.Find(platforms.Format(arm64))
+ require.Equal(t, "generic default", armImage.Manifest.Annotations["gd"])
+ require.Equal(t, "generic manifest", armImage.Manifest.Annotations["gm"])
+ require.Equal(t, "generic manifest opt", armImage.Manifest.Annotations["gmo"])
+ require.Equal(t, "arm64 manifest", armImage.Manifest.Annotations["m"])
+ require.Equal(t, "arm64 manifest opt", armImage.Manifest.Annotations["mo"])
+
+ // testing for oci exporter
+
+ destDir := t.TempDir()
+
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterOCI,
+ Output: fixedWriteCloser(outW),
+ Attrs: map[string]string{
+ "annotation-index.gio": "generic index opt",
+ "annotation-index-descriptor.gido": "generic index descriptor opt",
+ "annotation-index-descriptor." + ocispecs.AnnotationCreated: created,
+ "annotation-manifest.gmo": "generic manifest opt",
+ "annotation-manifest-descriptor.gmdo": "generic manifest descriptor opt",
+ "annotation-manifest[linux/amd64].mo": "amd64 manifest opt",
+ "annotation-manifest-descriptor[linux/amd64].mdo": "amd64 manifest descriptor opt",
+ "annotation-manifest[linux/arm64].mo": "arm64 manifest opt",
+ "annotation-manifest-descriptor[linux/arm64].mdo": "arm64 manifest descriptor opt",
+ },
},
- }}
- default:
- require.Fail(t, "unknown cache mode: %s", mode)
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(out)
+ require.NoError(t, err)
+
+ m, err := testutil.ReadTarToMap(dt, false)
+ require.NoError(t, err)
+
+ var layout ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &layout)
+ require.Equal(t, "generic index descriptor", layout.Manifests[0].Annotations["gid"])
+ require.Equal(t, "generic index descriptor opt", layout.Manifests[0].Annotations["gido"])
+ require.Equal(t, created, layout.Manifests[0].Annotations[ocispecs.AnnotationCreated])
+ require.NoError(t, err)
+
+ var index ocispecs.Index
+ err = json.Unmarshal(m["blobs/sha256/"+layout.Manifests[0].Digest.Hex()].Data, &index)
+ require.Equal(t, "generic index", index.Annotations["gi"])
+ require.Equal(t, "generic index opt", index.Annotations["gio"])
+ require.NoError(t, err)
+
+ for _, desc := range index.Manifests {
+ var mfst ocispecs.Manifest
+ err = json.Unmarshal(m["blobs/sha256/"+desc.Digest.Hex()].Data, &mfst)
+ require.NoError(t, err)
+
+ require.Equal(t, "generic default", mfst.Annotations["gd"])
+ require.Equal(t, "generic manifest", mfst.Annotations["gm"])
+ require.Equal(t, "generic manifest descriptor", desc.Annotations["gmd"])
+ require.Equal(t, "generic manifest opt", mfst.Annotations["gmo"])
+ require.Equal(t, "generic manifest descriptor opt", desc.Annotations["gmdo"])
+
+ switch {
+ case platforms.Only(amd64).Match(*desc.Platform):
+ require.Equal(t, "amd64 manifest", mfst.Annotations["m"])
+ require.Equal(t, "amd64 manifest descriptor", desc.Annotations["md"])
+ require.Equal(t, "amd64 manifest opt", mfst.Annotations["mo"])
+ require.Equal(t, "amd64 manifest descriptor opt", desc.Annotations["mdo"])
+ case platforms.Only(arm64).Match(*desc.Platform):
+ require.Equal(t, "arm64 manifest", mfst.Annotations["m"])
+ require.Equal(t, "arm64 manifest descriptor", desc.Annotations["md"])
+ require.Equal(t, "arm64 manifest opt", mfst.Annotations["mo"])
+ require.Equal(t, "arm64 manifest descriptor opt", desc.Annotations["mdo"])
+ default:
+ require.Fail(t, "unrecognized platform")
+ }
+ }
+}
+
+func testExportAnnotationsMediaTypes(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
}
+ require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
+ p := platforms.DefaultSpec()
+ ps := []ocispecs.Platform{p}
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ expPlatforms := &exptypes.Platforms{
+ Platforms: make([]exptypes.Platform, len(ps)),
+ }
+ for i, p := range ps {
+ st := llb.Scratch().File(
+ llb.Mkfile("platform", 0600, []byte(platforms.Format(p))),
+ )
+
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+
+ k := platforms.Format(p)
+ res.AddRef(k, ref)
+
+ expPlatforms.Platforms[i] = exptypes.Platform{
+ ID: k,
+ Platform: p,
+ }
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ return res, nil
+ }
+
+ target := registry + "/buildkit/testannotationsmedia:1"
+ _, err = c.Build(sb.Context(), SolveOpt{
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
- "name": target,
- "push": "true",
+ "name": target,
+ "push": "true",
+ "annotation-manifest.a": "b",
+ },
+ },
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(imgs.Images))
+
+ target2 := registry + "/buildkit/testannotationsmedia:2"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target2,
+ "push": "true",
+ "annotation-index.c": "d",
+ },
+ },
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ desc, provider, err = contentutil.ProviderFromRef(target2)
+ require.NoError(t, err)
+ imgs2, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(imgs2.Images))
+
+ require.Equal(t, "b", imgs.Images[0].Manifest.Annotations["a"])
+ require.Equal(t, "d", imgs2.Index.Annotations["c"])
+
+ require.Equal(t, images.MediaTypeDockerSchema2ManifestList, imgs.Index.MediaType)
+ require.Equal(t, ocispecs.MediaTypeImageIndex, imgs2.Index.MediaType)
+}
+
+func testExportAttestations(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ ps := []ocispecs.Platform{
+ platforms.MustParse("linux/amd64"),
+ platforms.MustParse("linux/arm64"),
+ }
+
+ success := []byte(`{"success": true}`)
+ successDigest := digest.SHA256.FromBytes(success)
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ expPlatforms := &exptypes.Platforms{}
+
+ for _, p := range ps {
+ pk := platforms.Format(p)
+ expPlatforms.Platforms = append(expPlatforms.Platforms, exptypes.Platform{ID: pk, Platform: p})
+
+ // build image
+ st := llb.Scratch().File(
+ llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))),
+ )
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(pk, ref)
+
+ // build attestations
+ st = llb.Scratch().
+ File(llb.Mkfile("/attestation.json", 0600, success)).
+ File(llb.Mkfile("/attestation2.json", 0600, []byte{}))
+ def, err = st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err = c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ refAttest, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddAttestation(pk, gateway.Attestation{
+ Kind: gatewaypb.AttestationKindInToto,
+ Ref: refAttest,
+ Path: "/attestation.json",
+ InToto: result.InTotoAttestation{
+ PredicateType: "https://example.com/attestations/v1.0",
+ Subjects: []result.InTotoSubject{{
+ Kind: gatewaypb.InTotoSubjectKindSelf,
+ }},
+ },
+ })
+ res.AddAttestation(pk, gateway.Attestation{
+ Kind: gatewaypb.AttestationKindInToto,
+ Ref: refAttest,
+ Path: "/attestation2.json",
+ InToto: result.InTotoAttestation{
+ PredicateType: "https://example.com/attestations2/v1.0",
+ Subjects: []result.InTotoSubject{{
+ Kind: gatewaypb.InTotoSubjectKindRaw,
+ Name: "/attestation.json",
+ Digest: []digest.Digest{successDigest},
+ }},
+ },
+ })
+ }
+
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ return res, nil
+ }
+
+ t.Run("image", func(t *testing.T) {
+ targets := []string{
+ registry + "/buildkit/testattestationsfoo:latest",
+ registry + "/buildkit/testattestationsbar:latest",
+ }
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": strings.Join(targets, ","),
+ "push": "true",
+ },
+ },
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(targets[0])
+ require.NoError(t, err)
+
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, len(ps)*2, len(imgs.Images))
+
+ var bases []*testutil.ImageInfo
+ for _, p := range ps {
+ pk := platforms.Format(p)
+ img := imgs.Find(pk)
+ require.NotNil(t, img)
+ require.Equal(t, pk, platforms.Format(*img.Desc.Platform))
+ require.Equal(t, 1, len(img.Layers))
+ require.Equal(t, []byte(fmt.Sprintf("hello %s!", pk)), img.Layers[0]["greeting"].Data)
+ bases = append(bases, img)
+ }
+
+ atts := imgs.Filter("unknown/unknown")
+ require.Equal(t, len(ps), len(atts.Images))
+ for i, att := range atts.Images {
+ require.Equal(t, ocispecs.MediaTypeImageManifest, att.Desc.MediaType)
+ require.Equal(t, "unknown/unknown", platforms.Format(*att.Desc.Platform))
+ require.Equal(t, "unknown/unknown", att.Img.OS+"/"+att.Img.Architecture)
+ require.Equal(t, attestation.DockerAnnotationReferenceTypeDefault, att.Desc.Annotations[attestation.DockerAnnotationReferenceType])
+ require.Equal(t, bases[i].Desc.Digest.String(), att.Desc.Annotations[attestation.DockerAnnotationReferenceDigest])
+ require.Equal(t, 2, len(att.Layers))
+ require.Equal(t, len(att.Layers), len(att.Img.RootFS.DiffIDs))
+ require.Equal(t, len(att.Img.History), 0)
+
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+
+ purls := map[string]string{}
+ for _, k := range targets {
+ p, _ := purl.RefToPURL(k, &ps[i])
+ purls[k] = p
+ }
+
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
+ require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
+ subjects := []intoto.Subject{
+ {
+ Name: purls[targets[0]],
+ Digest: map[string]string{
+ "sha256": bases[i].Desc.Digest.Encoded(),
+ },
+ },
+ {
+ Name: purls[targets[1]],
+ Digest: map[string]string{
+ "sha256": bases[i].Desc.Digest.Encoded(),
+ },
+ },
+ }
+ require.Equal(t, subjects, attest.Subject)
+
+ var attest2 intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[1], &attest2))
+
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type)
+ require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType)
+ require.Nil(t, attest2.Predicate)
+ subjects = []intoto.Subject{{
+ Name: "/attestation.json",
+ Digest: map[string]string{
+ "sha256": successDigest.Encoded(),
+ },
+ }}
+ require.Equal(t, subjects, attest2.Subject)
+ }
+
+ cdAddress := sb.ContainerdAddress()
+ if cdAddress == "" {
+ return
+ }
+ client, err := containerd.New(cdAddress)
+ require.NoError(t, err)
+ defer client.Close()
+ ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
+
+ for _, target := range targets {
+ err = client.ImageService().Delete(ctx, target, images.SynchronousDelete())
+ require.NoError(t, err)
+ }
+ checkAllReleasable(t, c, sb, true)
+ })
+
+ t.Run("local", func(t *testing.T) {
+ dir := t.TempDir()
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterLocal,
+ OutputDir: dir,
+ Attrs: map[string]string{
+ "attestation-prefix": "test.",
+ },
+ },
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ for _, p := range ps {
+ var attest intoto.Statement
+ dt, err := os.ReadFile(path.Join(dir, strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation.json"))
+ require.NoError(t, err)
+ require.NoError(t, json.Unmarshal(dt, &attest))
+
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
+ require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
+
+ require.Equal(t, []intoto.Subject{{
+ Name: "greeting",
+ Digest: result.ToDigestMap(digest.Canonical.FromString("hello " + platforms.Format(p) + "!")),
+ }}, attest.Subject)
+
+ var attest2 intoto.Statement
+ dt, err = os.ReadFile(path.Join(dir, strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation2.json"))
+ require.NoError(t, err)
+ require.NoError(t, json.Unmarshal(dt, &attest2))
+
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type)
+ require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType)
+ require.Nil(t, attest2.Predicate)
+ subjects := []intoto.Subject{{
+ Name: "/attestation.json",
+ Digest: map[string]string{
+ "sha256": successDigest.Encoded(),
+ },
+ }}
+ require.Equal(t, subjects, attest2.Subject)
+ }
+ })
+
+ t.Run("tar", func(t *testing.T) {
+ dir := t.TempDir()
+ out := filepath.Join(dir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterTar,
+ Output: fixedWriteCloser(outW),
+ Attrs: map[string]string{
+ "attestation-prefix": "test.",
+ },
},
},
- },
- CacheExports: cacheExports,
- }, nil)
- require.NoError(t, err)
+ }, "", frontend, nil)
+ require.NoError(t, err)
- // verify that the busybox image stayed lazy
- for _, layer := range busyboxManifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
- }
+ dt, err := os.ReadFile(out)
+ require.NoError(t, err)
- // get the random value at /bar/2
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ m, err := testutil.ReadTarToMap(dt, false)
+ require.NoError(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- }, nil)
- require.NoError(t, err)
+ for _, p := range ps {
+ var attest intoto.Statement
+ dt := m[path.Join(strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation.json")].Data
+ require.NoError(t, json.Unmarshal(dt, &attest))
+
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
+ require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
+
+ require.Equal(t, []intoto.Subject{{
+ Name: "greeting",
+ Digest: result.ToDigestMap(digest.Canonical.FromString("hello " + platforms.Format(p) + "!")),
+ }}, attest.Subject)
+
+ var attest2 intoto.Statement
+ dt = m[path.Join(strings.ReplaceAll(platforms.Format(p), "/", "_"), "test.attestation2.json")].Data
+ require.NoError(t, json.Unmarshal(dt, &attest2))
+
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest2.Type)
+ require.Equal(t, "https://example.com/attestations2/v1.0", attest2.PredicateType)
+ require.Nil(t, attest2.Predicate)
+ subjects := []intoto.Subject{{
+ Name: "/attestation.json",
+ Digest: map[string]string{
+ "sha256": successDigest.Encoded(),
+ },
+ }}
+ require.Equal(t, subjects, attest2.Subject)
+ }
+ })
+}
- bar2Contents, err := ioutil.ReadFile(filepath.Join(destDir, "bar", "2"))
+func testAttestationDefaultSubject(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
+ defer c.Close()
- // clear all local state out
- img, err := imageService.Get(ctx, target)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- manifest, err := images.Manifest(ctx, contentStore, img.Target, nil)
- require.NoError(t, err)
+ ps := []ocispecs.Platform{
+ platforms.MustParse("linux/amd64"),
+ }
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
- require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ success := []byte(`{"success": true}`)
- for _, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ expPlatforms := &exptypes.Platforms{}
+
+ for _, p := range ps {
+ pk := platforms.Format(p)
+ expPlatforms.Platforms = append(expPlatforms.Platforms, exptypes.Platform{ID: pk, Platform: p})
+
+ // build image
+ st := llb.Scratch().File(
+ llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))),
+ )
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(pk, ref)
+
+ // build attestations
+ st = llb.Scratch().File(llb.Mkfile("/attestation.json", 0600, success))
+ def, err = st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err = c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ refAttest, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddAttestation(pk, gateway.Attestation{
+ Kind: gatewaypb.AttestationKindInToto,
+ Ref: refAttest,
+ Path: "/attestation.json",
+ InToto: result.InTotoAttestation{
+ PredicateType: "https://example.com/attestations/v1.0",
+ },
+ })
+ }
+
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ return res, nil
}
- // re-run the same build with cache imports and verify everything stays lazy
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
+ target := registry + "/buildkit/testattestationsemptysubject:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
- }},
- CacheImports: cacheImports,
- CacheExports: cacheExports,
- }, nil)
+ },
+ }, "", frontend, nil)
require.NoError(t, err)
- // verify everything from before stayed lazy
- img, err = imageService.Get(ctx, target)
+ desc, provider, err := contentutil.ProviderFromRef(target)
require.NoError(t, err)
- manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
+ require.Equal(t, len(ps)*2, len(imgs.Images))
- for i, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i)
+ var bases []*testutil.ImageInfo
+ for _, p := range ps {
+ pk := platforms.Format(p)
+ bases = append(bases, imgs.Find(pk))
}
- // re-run the build with a change only to input1 using the remote cache
- input1 = llb.Scratch().
- File(llb.Mkdir("/dir", 0777)).
- File(llb.Mkfile("/dir/1", 0444, nil))
- input1Copy = llb.Scratch().File(llb.Copy(input1, "/dir/1", "/foo/1", &llb.CopyInfo{CreateDestPath: true}))
+ atts := imgs.Filter("unknown/unknown")
+ require.Equal(t, len(ps), len(atts.Images))
+ for i, att := range atts.Images {
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
- merge = llb.Merge([]llb.State{llb.Image(busyboxTarget), input1Copy, input2Copy})
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
+ require.Equal(t, map[string]interface{}{"success": true}, attest.Predicate)
- def, err = merge.Marshal(sb.Context())
- require.NoError(t, err)
+ name, _ := purl.RefToPURL(target, &ps[0])
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
+ subjects := []intoto.Subject{{
+ Name: name,
+ Digest: map[string]string{
+ "sha256": bases[i].Desc.Digest.Encoded(),
},
- }},
- CacheExports: cacheExports,
- CacheImports: cacheImports,
- }, nil)
- require.NoError(t, err)
+ }}
+ require.Equal(t, subjects, attest.Subject)
+ }
+}
- // verify everything from before stayed lazy except the middle layer for input1Copy
- img, err = imageService.Get(ctx, target)
+func testAttestationBundle(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
+ defer c.Close()
- manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- for i, layer := range manifest.Layers {
- switch i {
- case 0, 2:
- // bottom and top layer should stay lazy as they didn't change
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i)
- case 1:
- // middle layer had to be rebuilt, should exist locally
- _, err = contentStore.Info(ctx, layer.Digest)
- require.NoError(t, err)
- default:
- require.Fail(t, "unexpected layer index %d", i)
- }
+ ps := []ocispecs.Platform{
+ platforms.MustParse("linux/amd64"),
}
- // check the random value at /bar/2 didn't change
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+ expPlatforms := &exptypes.Platforms{}
+
+ for _, p := range ps {
+ pk := platforms.Format(p)
+ expPlatforms.Platforms = append(expPlatforms.Platforms, exptypes.Platform{ID: pk, Platform: p})
+
+ // build image
+ st := llb.Scratch().File(
+ llb.Mkfile("/greeting", 0600, []byte(fmt.Sprintf("hello %s!", pk))),
+ )
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(pk, ref)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
+ stmt := intoto.Statement{
+ StatementHeader: intoto.StatementHeader{
+ Type: intoto.StatementInTotoV01,
+ PredicateType: "https://example.com/attestations/v1.0",
+ },
+ Predicate: map[string]interface{}{
+ "foo": "1",
+ },
+ }
+ buff := bytes.NewBuffer(nil)
+ enc := json.NewEncoder(buff)
+ require.NoError(t, enc.Encode(stmt))
+
+ // build attestations
+ st = llb.Scratch()
+ st = st.File(
+ llb.Mkdir("/bundle", 0700),
+ )
+ st = st.File(
+ llb.Mkfile("/bundle/attestation.json", 0600, buff.Bytes()),
+ )
+ def, err = st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err = c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ refAttest, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddAttestation(pk, gateway.Attestation{
+ Kind: gatewaypb.AttestationKindBundle,
+ Ref: refAttest,
+ Path: "/bundle",
+ })
+ }
+
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ return res, nil
+ }
+
+ target := registry + "/buildkit/testattestationsbundle:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDir,
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
},
- CacheImports: cacheImports,
- }, nil)
+ }, "", frontend, nil)
require.NoError(t, err)
- newBar2Contents, err := ioutil.ReadFile(filepath.Join(destDir, "bar", "2"))
+ desc, provider, err := contentutil.ProviderFromRef(target)
require.NoError(t, err)
- require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed")
-
- // Now test the case with a layer on top of a merge.
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ require.Equal(t, len(ps)*2, len(imgs.Images))
- for _, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ var bases []*testutil.ImageInfo
+ for _, p := range ps {
+ pk := platforms.Format(p)
+ bases = append(bases, imgs.Find(pk))
}
- mergePlusLayer := merge.File(llb.Mkfile("/3", 0444, nil))
-
- def, err = mergePlusLayer.Marshal(sb.Context())
- require.NoError(t, err)
+ atts := imgs.Filter("unknown/unknown")
+ require.Equal(t, len(ps)*1, len(atts.Images))
+ for i, att := range atts.Images {
+ require.Equal(t, 1, len(att.LayersRaw))
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
+ require.Equal(t, "https://example.com/attestations/v1.0", attest.PredicateType)
+ require.Equal(t, map[string]interface{}{"foo": "1"}, attest.Predicate)
+ name, _ := purl.RefToPURL(target, &ps[i])
+ subjects := []intoto.Subject{{
+ Name: name,
+ Digest: map[string]string{
+ "sha256": bases[i].Desc.Digest.Encoded(),
},
- }},
- CacheExports: cacheExports,
- CacheImports: cacheImports,
- }, nil)
- require.NoError(t, err)
+ }}
+ require.Equal(t, subjects, attest.Subject)
+ }
+}
- // check the random value at /bar/2 didn't change
- destDir, err = ioutil.TempDir("", "buildkit")
+func testSBOMScan(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ defer c.Close()
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- CacheImports: cacheImports,
- }, nil)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- newBar2Contents, err = ioutil.ReadFile(filepath.Join(destDir, "bar", "2"))
- require.NoError(t, err)
+ p := platforms.MustParse("linux/amd64")
+ pk := platforms.Format(p)
- require.Equalf(t, bar2Contents, newBar2Contents, "bar/2 contents changed")
+ scannerFrontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
- // clear local state, repeat the build, verify everything stays lazy
- err = imageService.Delete(ctx, img.Name, images.SynchronousDelete())
- require.NoError(t, err)
- checkAllReleasable(t, c, sb, true)
+ st := llb.Image("busybox")
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
+
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(pk, ref)
+
+ expPlatforms := &exptypes.Platforms{
+ Platforms: []exptypes.Platform{{ID: pk, Platform: p}},
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ var img ocispecs.Image
+ cmd := `
+cat < $BUILDKIT_SCAN_DESTINATION/spdx.json
+{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://spdx.dev/Document",
+ "predicate": {"name": "fallback"}
+}
+EOF
+`
+ img.Config.Cmd = []string{"/bin/sh", "-c", cmd}
+ config, err := json.Marshal(img)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to marshal image config")
+ }
+ res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, pk), config)
- for _, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v", err)
+ return res, nil
}
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
+ scannerTarget := registry + "/buildkit/testsbomscanner:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": scannerTarget,
+ "push": "true",
+ },
},
- }},
- CacheImports: cacheImports,
- CacheExports: cacheExports,
- }, nil)
- require.NoError(t, err)
-
- img, err = imageService.Get(ctx, target)
+ },
+ }, "", scannerFrontend, nil)
require.NoError(t, err)
- manifest, err = images.Manifest(ctx, contentStore, img.Target, nil)
- require.NoError(t, err)
+ makeTargetFrontend := func(attest bool) func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ return func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
- for i, layer := range manifest.Layers {
- _, err = contentStore.Info(ctx, layer.Digest)
- require.ErrorIs(t, err, ctderrdefs.ErrNotFound, "unexpected error %v for index %d", err, i)
- }
-}
+ // build image
+ st := llb.Scratch().File(
+ llb.Mkfile("/greeting", 0600, []byte("hello world!")),
+ )
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(pk, ref)
-func requireContents(ctx context.Context, t *testing.T, c *Client, sb integration.Sandbox, state llb.State, cacheImports, cacheExports []CacheOptionsEntry, imageTarget string, files ...fstest.Applier) {
- t.Helper()
+ expPlatforms := &exptypes.Platforms{
+ Platforms: []exptypes.Platform{{ID: pk, Platform: p}},
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ // build attestations
+ if attest {
+ st = llb.Scratch().
+ File(llb.Mkfile("/result.spdx", 0600, []byte(`{"name": "frontend"}`)))
+ def, err = st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err = c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ refAttest, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
- def, err := state.Marshal(ctx)
- require.NoError(t, err)
+ res.AddAttestation(pk, gateway.Attestation{
+ Kind: gatewaypb.AttestationKindInToto,
+ Ref: refAttest,
+ Path: "/result.spdx",
+ InToto: result.InTotoAttestation{
+ PredicateType: intoto.PredicateSPDX,
+ },
+ })
+ }
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ return res, nil
+ }
+ }
- _, err = c.Solve(ctx, def, SolveOpt{
+ // test the default fallback scanner
+ target := registry + "/buildkit/testsbom:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:sbom": "",
+ },
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDir,
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
},
- CacheImports: cacheImports,
- CacheExports: cacheExports,
- }, nil)
+ }, "", makeTargetFrontend(false), nil)
require.NoError(t, err)
- require.NoError(t, fstest.CheckDirectoryEqualWithApplier(destDir, fstest.Apply(files...)))
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
- if imageTarget != "" {
- var exports []ExportEntry
- if os.Getenv("TEST_DOCKERD") == "1" {
- exports = []ExportEntry{{
- Type: "moby",
- Attrs: map[string]string{
- "name": imageTarget,
- },
- }}
- } else {
- exports = []ExportEntry{{
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
+
+ // test the frontend builtin scanner
+ target = registry + "/buildkit/testsbom2:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:sbom": "",
+ },
+ Exports: []ExportEntry{
+ {
Type: ExporterImage,
Attrs: map[string]string{
- "name": imageTarget,
+ "name": target,
"push": "true",
},
- }}
- }
-
- _, err = c.Solve(ctx, def, SolveOpt{Exports: exports, CacheImports: cacheImports, CacheExports: cacheExports}, nil)
- require.NoError(t, err)
- resetState(t, c, sb)
- requireContents(ctx, t, c, sb, llb.Image(imageTarget, llb.ResolveModePreferLocal), cacheImports, nil, "", files...)
- }
-}
-
-func requireEqualContents(ctx context.Context, t *testing.T, c *Client, stateA, stateB llb.State) {
- t.Helper()
+ },
+ },
+ }, "", makeTargetFrontend(true), nil)
+ require.NoError(t, err)
- defA, err := stateA.Marshal(ctx)
+ desc, provider, err = contentutil.ProviderFromRef(target)
require.NoError(t, err)
- destDirA, err := ioutil.TempDir("", "buildkit")
+ imgs, err = testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
- defer os.RemoveAll(destDirA)
+ require.Equal(t, 2, len(imgs.Images))
- _, err = c.Solve(ctx, defA, SolveOpt{
+ att := imgs.Find("unknown/unknown")
+ attest := intoto.Statement{}
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
+ require.Subset(t, attest.Predicate, map[string]interface{}{"name": "frontend"})
+
+ // test the specified fallback scanner
+ target = registry + "/buildkit/testsbom3:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:sbom": "generator=" + scannerTarget,
+ },
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDirA,
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
},
- }, nil)
+ }, "", makeTargetFrontend(false), nil)
require.NoError(t, err)
- defB, err := stateB.Marshal(ctx)
+ desc, provider, err = contentutil.ProviderFromRef(target)
require.NoError(t, err)
- destDirB, err := ioutil.TempDir("", "buildkit")
+ imgs, err = testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
- defer os.RemoveAll(destDirB)
+ require.Equal(t, 2, len(imgs.Images))
- _, err = c.Solve(ctx, defB, SolveOpt{
+ att = imgs.Find("unknown/unknown")
+ attest = intoto.Statement{}
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
+ require.Subset(t, attest.Predicate, map[string]interface{}{"name": "fallback"})
+
+ // test the builtin frontend scanner and the specified fallback scanner together
+ target = registry + "/buildkit/testsbom3:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:sbom": "generator=" + scannerTarget,
+ },
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- OutputDir: destDirB,
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
},
- }, nil)
+ }, "", makeTargetFrontend(true), nil)
require.NoError(t, err)
- require.NoError(t, fstest.CheckDirectoryEqual(destDirA, destDirB))
-}
+ desc, provider, err = contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
-func runShellExecState(base llb.State, cmds ...string) llb.ExecState {
- return base.Run(llb.Args([]string{"sh", "-c", strings.Join(cmds, " && ")}))
-}
+ imgs, err = testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
-func runShell(base llb.State, cmds ...string) llb.State {
- return runShellExecState(base, cmds...).Root()
+ att = imgs.Find("unknown/unknown")
+ attest = intoto.Statement{}
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
+ require.Subset(t, attest.Predicate, map[string]interface{}{"name": "frontend"})
}
-func chainRunShells(base llb.State, cmdss ...[]string) llb.State {
- for _, cmds := range cmdss {
- base = runShell(base, cmds...)
- }
- return base
-}
+func testSBOMScanSingleRef(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM)
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
-func requiresLinux(t *testing.T) {
- if runtime.GOOS != "linux" {
- t.Skipf("unsupported GOOS: %s", runtime.GOOS)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
}
-}
+ require.NoError(t, err)
-// ensurePruneAll tries to ensure Prune completes with retries.
-// Current cache implementation defers release-related logic using goroutine so
-// there can be situation where a build has finished but the following prune doesn't
-// cleanup cache because some records still haven't been released.
-// This function tries to ensure prune by retrying it.
-func ensurePruneAll(t *testing.T, c *Client, sb integration.Sandbox) {
- for i := 0; i < 2; i++ {
- require.NoError(t, c.Prune(sb.Context(), nil, PruneAll))
- for j := 0; j < 20; j++ {
- du, err := c.DiskUsage(sb.Context())
- require.NoError(t, err)
- if len(du) == 0 {
- return
- }
- time.Sleep(500 * time.Millisecond)
- }
- t.Logf("retrying prune(%d)", i)
- }
- t.Fatalf("failed to ensure prune")
-}
+ p := platforms.DefaultSpec()
+ pk := platforms.Format(p)
-func checkAllReleasable(t *testing.T, c *Client, sb integration.Sandbox, checkContent bool) {
- retries := 0
-loop0:
- for {
- require.True(t, 20 > retries)
- retries++
- du, err := c.DiskUsage(sb.Context())
+ scannerFrontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+
+ st := llb.Image("busybox")
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- for _, d := range du {
- if d.InUse {
- time.Sleep(500 * time.Millisecond)
- continue loop0
- }
- }
- break
- }
- err := c.Prune(sb.Context(), nil, PruneAll)
- require.NoError(t, err)
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(pk, ref)
- du, err := c.DiskUsage(sb.Context())
- require.NoError(t, err)
- require.Equal(t, 0, len(du))
+ expPlatforms := &exptypes.Platforms{
+ Platforms: []exptypes.Platform{{ID: pk, Platform: p}},
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ var img ocispecs.Image
+ cmd := `
+cat < $BUILDKIT_SCAN_DESTINATION/spdx.json
+{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://spdx.dev/Document",
+ "predicate": {"name": "fallback"}
+}
+EOF
+`
+ img.Config.Cmd = []string{"/bin/sh", "-c", cmd}
+ config, err := json.Marshal(img)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to marshal image config")
+ }
+ res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, pk), config)
- // examine contents of exported tars (requires containerd)
- cdAddress := sb.ContainerdAddress()
- if cdAddress == "" {
- t.Logf("checkAllReleasable: skipping check for exported tars in non-containerd test")
- return
+ return res, nil
}
- // TODO: make public pull helper function so this can be checked for standalone as well
-
- client, err := newContainerd(cdAddress)
+ scannerTarget := registry + "/buildkit/testsbomscanner:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": scannerTarget,
+ "push": "true",
+ },
+ },
+ },
+ }, "", scannerFrontend, nil)
require.NoError(t, err)
- defer client.Close()
- ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
- snapshotService := client.SnapshotService("overlayfs")
+ targetFrontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
- retries = 0
- for {
- count := 0
- err = snapshotService.Walk(ctx, func(context.Context, snapshots.Info) error {
- count++
- return nil
+ // build image
+ st := llb.Scratch().File(
+ llb.Mkfile("/greeting", 0600, []byte("hello world!")),
+ )
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
})
- require.NoError(t, err)
- if count == 0 {
- break
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
}
- require.True(t, 20 > retries)
- retries++
- time.Sleep(500 * time.Millisecond)
- }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.SetRef(ref)
- if !checkContent {
- return
- }
+ var img ocispecs.Image
+ img.Config.Cmd = []string{"/bin/sh", "-c", "cat /greeting"}
+ config, err := json.Marshal(img)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to marshal image config")
+ }
+ res.AddMeta(exptypes.ExporterImageConfigKey, config)
- retries = 0
- for {
- count := 0
- var infos []content.Info
- err = client.ContentStore().Walk(ctx, func(info content.Info) error {
- count++
- infos = append(infos, info)
- return nil
- })
- require.NoError(t, err)
- if count == 0 {
- break
+ expPlatforms := &exptypes.Platforms{
+ Platforms: []exptypes.Platform{{ID: pk, Platform: p}},
}
- if retries >= 20 {
- require.FailNowf(t, "content still exists", "%+v", infos)
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
}
- retries++
- time.Sleep(500 * time.Millisecond)
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ return res, nil
}
+
+ target := registry + "/buildkit/testsbomsingle:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:sbom": "generator=" + scannerTarget,
+ },
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, "", targetFrontend, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
+
+ img := imgs.Find(pk)
+ require.NotNil(t, img)
+ require.Equal(t, []string{"/bin/sh", "-c", "cat /greeting"}, img.Img.Config.Cmd)
+
+ att := imgs.Find("unknown/unknown")
+ require.NotNil(t, att)
+ attest := intoto.Statement{}
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
+ require.Subset(t, attest.Predicate, map[string]interface{}{"name": "fallback"})
}
-func testInvalidExporter(t *testing.T, sb integration.Sandbox) {
+func testSBOMSupplements(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- def, err := llb.Image("busybox:latest").Marshal(sb.Context())
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ p := platforms.MustParse("linux/amd64")
+ pk := platforms.Format(p)
- target := "example.com/buildkit/testoci:latest"
- attrs := map[string]string{
- "name": target,
- }
- for _, exp := range []string{ExporterOCI, ExporterDocker} {
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res := gateway.NewResult()
+
+ // build image
+ st := llb.Scratch().File(
+ llb.Mkfile("/foo", 0600, []byte{}),
+ )
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ ref, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+ res.AddRef(pk, ref)
+
+ expPlatforms := &exptypes.Platforms{
+ Platforms: []exptypes.Platform{{ID: pk, Platform: p}},
+ }
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
+ // build attestations
+ doc := spdx.Document{
+ SPDXIdentifier: "DOCUMENT",
+ Files: []*spdx.File{
{
- Type: exp,
- Attrs: attrs,
+ // foo exists...
+ FileSPDXIdentifier: "SPDXRef-File-foo",
+ FileName: "/foo",
},
- },
- }, nil)
- // output file writer is required
- require.Error(t, err)
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
{
- Type: exp,
- Attrs: attrs,
- OutputDir: destDir,
+ // ...but bar doesn't
+ FileSPDXIdentifier: "SPDXRef-File-bar",
+ FileName: "/bar",
},
},
- }, nil)
- // output directory is not supported
- require.Error(t, err)
+ }
+ docBytes, err := json.Marshal(doc)
+ if err != nil {
+ return nil, err
+ }
+ st = llb.Scratch().
+ File(llb.Mkfile("/result.spdx", 0600, docBytes))
+ def, err = st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err = c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ refAttest, err := r.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ _, err = ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+
+ res.AddAttestation(pk, gateway.Attestation{
+ Kind: gatewaypb.AttestationKindInToto,
+ Ref: refAttest,
+ Path: "/result.spdx",
+ InToto: result.InTotoAttestation{
+ PredicateType: intoto.PredicateSPDX,
+ },
+ Metadata: map[string][]byte{
+ result.AttestationSBOMCore: []byte("result"),
+ },
+ })
+
+ return res, nil
}
- _, err = c.Solve(sb.Context(), def, SolveOpt{
+ // test the default fallback scanner
+ target := registry + "/buildkit/testsbom:latest"
+ _, err = c.Build(sb.Context(), SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:sbom": "",
+ },
Exports: []ExportEntry{
{
- Type: ExporterLocal,
- Attrs: attrs,
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
},
- }, nil)
- // output directory is required
- require.Error(t, err)
+ }, "", frontend, nil)
+ require.NoError(t, err)
- f, err := os.Create(filepath.Join(destDir, "a"))
+ desc, provider, err := contentutil.ProviderFromRef(target)
require.NoError(t, err)
- defer f.Close()
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- Attrs: attrs,
- Output: fixedWriteCloser(f),
- },
- },
- }, nil)
- // output file writer is not supported
- require.Error(t, err)
- checkAllReleasable(t, c, sb, true)
-}
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
-// moby/buildkit#492
-func testParallelLocalBuilds(t *testing.T, sb integration.Sandbox) {
- ctx, cancel := context.WithCancel(sb.Context())
- defer cancel()
+ att := imgs.Find("unknown/unknown")
+ attest := struct {
+ intoto.StatementHeader
+ Predicate spdx.Document
+ }{}
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)
+
+ require.Equal(t, "DOCUMENT", string(attest.Predicate.SPDXIdentifier))
+ require.Len(t, attest.Predicate.Files, 2)
+ require.Equal(t, attest.Predicate.Files[0].FileName, "/foo")
+ require.Regexp(t, "^layerID: sha256:", attest.Predicate.Files[0].FileComment)
+ require.Equal(t, attest.Predicate.Files[1].FileName, "/bar")
+ require.Empty(t, attest.Predicate.Files[1].FileComment)
+}
- c, err := New(ctx, sb.Address())
+func testMultipleCacheExports(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureMultiCacheExport)
+ c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- eg, ctx := errgroup.WithContext(ctx)
-
- for i := 0; i < 3; i++ {
- func(i int) {
- eg.Go(func() error {
- fn := fmt.Sprintf("test%d", i)
- srcDir, err := tmpdir(
- fstest.CreateFile(fn, []byte("contents"), 0600),
- )
- require.NoError(t, err)
- defer os.RemoveAll(srcDir)
-
- def, err := llb.Local("source").Marshal(sb.Context())
- require.NoError(t, err)
-
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- _, err = c.Solve(ctx, def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterLocal,
- OutputDir: destDir,
- },
- },
- LocalDirs: map[string]string{
- "source": srcDir,
- },
- }, nil)
- require.NoError(t, err)
-
- act, err := ioutil.ReadFile(filepath.Join(destDir, fn))
- require.NoError(t, err)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
- require.Equal(t, "contents", string(act))
- return nil
- })
- }(i)
+ busybox := llb.Image("busybox:latest")
+ st := llb.Scratch()
+ run := func(cmd string) {
+ st = busybox.Run(llb.Shlex(cmd), llb.Dir("/wd")).AddMount("/wd", st)
}
+ run(`sh -c "echo -n foobar > const"`)
+ run(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > unique"`)
- err = eg.Wait()
+ def, err := st.Marshal(sb.Context())
require.NoError(t, err)
-}
-// testRelativeMountpoint is a test that relative paths for mountpoints don't
-// fail when runc is upgraded to at least rc95, which introduces an error when
-// mountpoints are not absolute. Relative paths should be transformed to
-// absolute points based on the llb.State's current working directory.
-func testRelativeMountpoint(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ target := path.Join(registry, "image:test")
+ target2 := path.Join(registry, "image-copy:test")
+ cacheRef := path.Join(registry, "cache:test")
+ cacheOutDir, cacheOutDir2 := t.TempDir(), t.TempDir()
+
+ res, err := c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": cacheOutDir,
+ },
+ },
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": cacheOutDir2,
+ },
+ },
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": cacheRef,
+ },
+ },
+ {
+ Type: "inline",
+ },
+ },
+ }, nil)
require.NoError(t, err)
- defer c.Close()
- id := identity.NewID()
+ ensureFile(t, filepath.Join(cacheOutDir, "index.json"))
+ ensureFile(t, filepath.Join(cacheOutDir2, "index.json"))
- st := llb.Image("busybox:latest").Dir("/root").Run(
- llb.Shlexf("sh -c 'echo -n %s > /root/relpath/data'", id),
- ).AddMount("relpath", llb.Scratch())
+ dgst := res.ExporterResponse[exptypes.ExporterImageDigestKey]
- def, err := st.Marshal(sb.Context())
+ uniqueFile, err := readFileInImage(sb.Context(), t, c, target+"@"+dgst, "/unique")
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
+ res, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterImage,
+ Attrs: map[string]string{
+ "name": target2,
+ "push": "true",
+ },
+ },
+ },
+ CacheExports: []CacheOptionsEntry{
+ {
+ Type: "inline",
+ },
+ },
+ }, nil)
require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ dgst2 := res.ExporterResponse[exptypes.ExporterImageDigestKey]
+ require.Equal(t, dgst, dgst2)
+
+ destDir := t.TempDir()
+ ensurePruneAll(t, c, sb)
_, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
@@ -5538,235 +8463,233 @@ func testRelativeMountpoint(t *testing.T, sb integration.Sandbox) {
OutputDir: destDir,
},
},
+ CacheImports: []CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": cacheRef,
+ },
+ },
+ },
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "data"))
- require.NoError(t, err)
- require.Equal(t, dt, []byte(id))
+ ensureFileContents(t, filepath.Join(destDir, "const"), "foobar")
+ ensureFileContents(t, filepath.Join(destDir, "unique"), string(uniqueFile))
}
-// moby/buildkit#2476
-func testBuildInfoExporter(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
+func testMountStubsDirectory(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
- st := llb.Image("busybox:latest").Run(
- llb.Args([]string{"/bin/sh", "-c", `echo hello`}),
- )
- def, err := st.Marshal(sb.Context())
- if err != nil {
- return nil, err
- }
- return c.Solve(ctx, gateway.SolveRequest{
- Definition: def.ToPB(),
- FrontendOpt: map[string]string{"build-arg:foo": "bar"},
- })
- }
+ st := llb.Image("busybox:latest").
+ File(llb.Mkdir("/test", 0700)).
+ File(llb.Mkdir("/test/qux/", 0700)).
+ Run(
+ llb.Args([]string{"touch", "/test/baz/keep"}),
+ // check stubs directory is removed
+ llb.AddMount("/test/foo", llb.Scratch(), llb.Tmpfs()),
+ // check that stubs directory are recursively removed
+ llb.AddMount("/test/bar/x/y", llb.Scratch(), llb.Tmpfs()),
+ // check that only empty stubs directories are removed
+ llb.AddMount("/test/baz/x", llb.Scratch(), llb.Tmpfs()),
+ // check that previously existing directory are not removed
+ llb.AddMount("/test/qux", llb.Scratch(), llb.Tmpfs()),
+ ).Root()
+ st = llb.Scratch().File(llb.Copy(st, "/test", "/", &llb.CopyInfo{CopyDirContentsOnly: true}))
+ def, err := st.Marshal(sb.Context())
+ require.NoError(t, err)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
+ tmpDir := t.TempDir()
+ tarFile := filepath.Join(tmpDir, "out.tar")
+ tarFileW, err := os.Create(tarFile)
require.NoError(t, err)
+ defer tarFileW.Close()
- res, err := c.Build(sb.Context(), SolveOpt{
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
Exports: []ExportEntry{
{
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": registry + "/buildkit/test-buildinfo:latest",
- "push": "true",
- },
+ Type: ExporterTar,
+ Output: fixedWriteCloser(tarFileW),
},
},
- }, "", frontend, nil)
+ }, nil)
require.NoError(t, err)
+ tarFileW.Close()
- require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo)
- decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo])
+ dt, err := os.ReadFile(tarFile)
require.NoError(t, err)
- var exbi binfotypes.BuildInfo
- err = json.Unmarshal(decbi, &exbi)
+ m, err := testutil.ReadTarToMap(dt, false)
require.NoError(t, err)
- attrval := "bar"
- require.Equal(t, exbi.Attrs, map[string]*string{"build-arg:foo": &attrval})
- require.Equal(t, len(exbi.Sources), 1)
- require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage)
- require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest")
+ keys := make([]string, 0, len(m))
+ for k := range m {
+ keys = append(keys, k)
+ }
+
+ require.ElementsMatch(t, []string{
+ "baz/",
+ "baz/keep",
+ "qux/",
+ }, keys)
}
-// moby/buildkit#2476
-func testBuildInfoInline(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
+// https://github.com/moby/buildkit/issues/3148
+func testMountStubsTimestamp(t *testing.T, sb integration.Sandbox) {
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
+ const sourceDateEpoch = int64(1234567890) // Fri Feb 13 11:31:30 PM UTC 2009
st := llb.Image("busybox:latest").Run(
- llb.Args([]string{"/bin/sh", "-c", `echo hello`}),
+ llb.Args([]string{"/bin/touch", fmt.Sprintf("--date=@%d", sourceDateEpoch),
+ "/bin",
+ "/etc",
+ "/var",
+ "/var/foo",
+ "/tmp",
+ "/tmp/foo2",
+ "/tmp/foo2/bar",
+ }),
+ llb.AddMount("/var/foo", llb.Scratch(), llb.Tmpfs()),
+ llb.AddMount("/tmp/foo2/bar", llb.Scratch(), llb.Tmpfs()),
)
def, err := st.Marshal(sb.Context())
require.NoError(t, err)
- registry, err := sb.NewRegistry()
- if errors.Is(err, integration.ErrRequirements) {
- t.Skip(err.Error())
- }
+ tmpDir := t.TempDir()
+ tarFile := filepath.Join(tmpDir, "out.tar")
+ tarFileW, err := os.Create(tarFile)
require.NoError(t, err)
+ defer tarFileW.Close()
- cdAddress := sb.ContainerdAddress()
- if cdAddress == "" {
- t.Skip("rest of test requires containerd worker")
- }
-
- client, err := newContainerd(cdAddress)
+ _, err = c.Solve(sb.Context(), def, SolveOpt{
+ Exports: []ExportEntry{
+ {
+ Type: ExporterTar,
+ Output: fixedWriteCloser(tarFileW),
+ },
+ },
+ }, nil)
require.NoError(t, err)
- defer client.Close()
+ tarFileW.Close()
- ctx := namespaces.WithNamespace(sb.Context(), "buildkit")
-
- for _, tt := range []struct {
- name string
- buildAttrs bool
- }{{
- "attrsEnabled",
- true,
- }, {
- "attrsDisabled",
- false,
- }} {
- t.Run(tt.name, func(t *testing.T) {
- target := registry + "/buildkit/test-buildinfo:latest"
-
- _, err = c.Solve(sb.Context(), def, SolveOpt{
- Exports: []ExportEntry{
- {
- Type: ExporterImage,
- Attrs: map[string]string{
- "name": target,
- "push": "true",
- "buildinfo-attrs": strconv.FormatBool(tt.buildAttrs),
- },
- },
- },
- FrontendAttrs: map[string]string{
- "build-arg:foo": "bar",
- },
- }, nil)
- require.NoError(t, err)
-
- img, err := client.GetImage(ctx, target)
- require.NoError(t, err)
-
- desc, err := img.Config(ctx)
- require.NoError(t, err)
-
- dt, err := content.ReadBlob(ctx, img.ContentStore(), desc)
- require.NoError(t, err)
-
- var config binfotypes.ImageConfig
- require.NoError(t, json.Unmarshal(dt, &config))
-
- dec, err := base64.StdEncoding.DecodeString(config.BuildInfo)
- require.NoError(t, err)
-
- var bi binfotypes.BuildInfo
- require.NoError(t, json.Unmarshal(dec, &bi))
-
- if tt.buildAttrs {
- attrval := "bar"
- require.Contains(t, bi.Attrs, "build-arg:foo")
- require.Equal(t, bi.Attrs["build-arg:foo"], &attrval)
- } else {
- require.NotContains(t, bi.Attrs, "build-arg:foo")
- }
- require.Equal(t, len(bi.Sources), 1)
- require.Equal(t, bi.Sources[0].Type, binfotypes.SourceTypeDockerImage)
- require.Equal(t, bi.Sources[0].Ref, "docker.io/library/busybox:latest")
- })
- }
-}
-
-func testBuildInfoNoExport(t *testing.T, sb integration.Sandbox) {
- requiresLinux(t)
- c, err := New(sb.Context(), sb.Address())
+ tarFileR, err := os.Open(tarFile)
require.NoError(t, err)
- defer c.Close()
-
- frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
- st := llb.Image("busybox:latest").Run(
- llb.Args([]string{"/bin/sh", "-c", `echo hello`}),
- )
- def, err := st.Marshal(sb.Context())
- if err != nil {
- return nil, err
+ defer tarFileR.Close()
+ tarR := tar.NewReader(tarFileR)
+ touched := map[string]*tar.Header{
+ "bin/": nil, // Regular dir
+ "etc/": nil, // Parent of file mounts (etc/{resolv.conf, hosts})
+ "var/": nil, // Parent of dir mount (var/foo/)
+ "tmp/": nil, // Grandparent of dir mount (tmp/foo2/bar/)
+ // No support for reproducing the timestamps of mount point directories such as var/foo/ and tmp/foo2/bar/,
+ // because the touched timestamp value is lost when the mount is unmounted.
+ }
+ for {
+ hd, err := tarR.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ require.NoError(t, err)
+ if x, ok := touched[hd.Name]; ok && x == nil {
+ touched[hd.Name] = hd
}
- return c.Solve(ctx, gateway.SolveRequest{
- Definition: def.ToPB(),
- FrontendOpt: map[string]string{"build-arg:foo": "bar"},
- })
}
+ for name, hd := range touched {
+ t.Logf("Verifying %q (%+v)", name, hd)
+ require.NotNil(t, hd, name)
+ require.Equal(t, sourceDateEpoch, hd.ModTime.Unix(), name)
+ }
+}
- res, err := c.Build(sb.Context(), SolveOpt{}, "", frontend, nil)
- require.NoError(t, err)
-
- require.Contains(t, res.ExporterResponse, exptypes.ExporterBuildInfo)
- decbi, err := base64.StdEncoding.DecodeString(res.ExporterResponse[exptypes.ExporterBuildInfo])
- require.NoError(t, err)
+func ensureFile(t *testing.T, path string) {
+ st, err := os.Stat(path)
+ require.NoError(t, err, "expected file at %s", path)
+ require.True(t, st.Mode().IsRegular())
+}
- var exbi binfotypes.BuildInfo
- err = json.Unmarshal(decbi, &exbi)
+func ensureFileContents(t *testing.T, path, expectedContents string) {
+ contents, err := os.ReadFile(path)
require.NoError(t, err)
-
- attrval := "bar"
- require.Equal(t, exbi.Attrs, map[string]*string{"build-arg:foo": &attrval})
- require.Equal(t, len(exbi.Sources), 1)
- require.Equal(t, exbi.Sources[0].Type, binfotypes.SourceTypeDockerImage)
- require.Equal(t, exbi.Sources[0].Ref, "docker.io/library/busybox:latest")
+ require.Equal(t, expectedContents, string(contents))
}
-func tmpdir(appliers ...fstest.Applier) (string, error) {
- tmpdir, err := ioutil.TempDir("", "buildkit-client")
+func makeSSHAgentSock(t *testing.T, agent agent.Agent) (p string, err error) {
+ tmpDir, err := integration.Tmpdir(t)
if err != nil {
return "", err
}
- if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil {
+
+ sockPath := filepath.Join(tmpDir, "ssh_auth_sock")
+
+ l, err := net.Listen("unix", sockPath)
+ if err != nil {
return "", err
}
- return tmpdir, nil
+ t.Cleanup(func() {
+ require.NoError(t, l.Close())
+ })
+
+ s := &server{l: l}
+ go s.run(agent)
+
+ return sockPath, nil
+}
+
+type imageTimestamps struct {
+ FromImage []string // from img.Created and img.[]History.Created
+ FromAnnotation string // from index.Manifests[0].Annotations["org.opencontainers.image.created"]
}
-func makeSSHAgentSock(agent agent.Agent) (p string, cleanup func() error, err error) {
- tmpDir, err := ioutil.TempDir("", "buildkit")
+func readImageTimestamps(dt []byte) (*imageTimestamps, error) {
+ m, err := testutil.ReadTarToMap(dt, false)
if err != nil {
- return "", nil, err
+ return nil, err
}
- defer func() {
- if err != nil {
- os.RemoveAll(tmpDir)
- }
- }()
- sockPath := filepath.Join(tmpDir, "ssh_auth_sock")
+ if _, ok := m["oci-layout"]; !ok {
+ return nil, errors.Errorf("no oci-layout")
+ }
- l, err := net.Listen("unix", sockPath)
- if err != nil {
- return "", nil, err
+ var index ocispecs.Index
+ if err := json.Unmarshal(m["index.json"].Data, &index); err != nil {
+ return nil, err
+ }
+ if len(index.Manifests) != 1 {
+ return nil, errors.Errorf("invalid manifest count %d", len(index.Manifests))
}
- s := &server{l: l}
- go s.run(agent)
+ var res imageTimestamps
+ res.FromAnnotation = index.Manifests[0].Annotations[ocispecs.AnnotationCreated]
+
+ var mfst ocispecs.Manifest
+ if err := json.Unmarshal(m["blobs/sha256/"+index.Manifests[0].Digest.Hex()].Data, &mfst); err != nil {
+ return nil, err
+ }
+ // don't unmarshal to image type so we get the original string value
+ type history struct {
+ Created string `json:"created"`
+ }
+
+ img := struct {
+ History []history `json:"history"`
+ Created string `json:"created"`
+ }{}
+
+ if err := json.Unmarshal(m["blobs/sha256/"+mfst.Config.Digest.Hex()].Data, &img); err != nil {
+ return nil, err
+ }
- return sockPath, func() error {
- l.Close()
- return os.RemoveAll(tmpDir)
- }, nil
+ res.FromImage = []string{
+ img.Created,
+ }
+ for _, h := range img.History {
+ res.FromImage = append(res.FromImage, h.Created)
+ }
+ return &res, nil
}
type server struct {
@@ -5811,11 +8734,159 @@ func (*netModeDefault) UpdateConfigFile(in string) string {
return in
}
+type netModeBridgeDNS struct{}
+
+func (*netModeBridgeDNS) UpdateConfigFile(in string) string {
+ return in + `
+# configure bridge networking
+[worker.oci]
+networkMode = "cni"
+cniConfigPath = "/etc/buildkit/dns-cni.conflist"
+
+[worker.containerd]
+networkMode = "cni"
+cniConfigPath = "/etc/buildkit/dns-cni.conflist"
+
+[dns]
+nameservers = ["10.11.0.1"]
+`
+}
+
var hostNetwork integration.ConfigUpdater = &netModeHost{}
var defaultNetwork integration.ConfigUpdater = &netModeDefault{}
+var bridgeDNSNetwork integration.ConfigUpdater = &netModeBridgeDNS{}
func fixedWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
return func(map[string]string) (io.WriteCloser, error) {
return wc, nil
}
}
+
+func testSourcePolicy(t *testing.T, sb integration.Sandbox) {
+ requiresLinux(t)
+ c, err := New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.Image("busybox:1.34.1-uclibc").File(
+ llb.Copy(llb.HTTP("https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md"),
+ "README.md", "README.md"))
+ def, err := st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
+ return c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ }
+
+ type testCase struct {
+ srcPol *sourcepolicypb.Policy
+ expectedErr string
+ }
+ testCases := []testCase{
+ {
+ // Valid
+ srcPol: &sourcepolicypb.Policy{
+ Rules: []*sourcepolicypb.Rule{
+ {
+ Action: sourcepolicypb.PolicyAction_CONVERT,
+ Selector: &sourcepolicypb.Selector{
+ Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc",
+ },
+ Updates: &sourcepolicypb.Update{
+ Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc@sha256:3614ca5eacf0a3a1bcc361c939202a974b4902b9334ff36eb29ffe9011aaad83",
+ },
+ },
+ {
+ Action: sourcepolicypb.PolicyAction_CONVERT,
+ Selector: &sourcepolicypb.Selector{
+ Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md",
+ },
+ Updates: &sourcepolicypb.Update{
+ Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md",
+ Attrs: map[string]string{"http.checksum": "sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53"},
+ },
+ },
+ },
+ },
+ expectedErr: "",
+ },
+ {
+ // Invalid docker-image source
+ srcPol: &sourcepolicypb.Policy{
+ Rules: []*sourcepolicypb.Rule{
+ {
+ Action: sourcepolicypb.PolicyAction_CONVERT,
+ Selector: &sourcepolicypb.Selector{
+ Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc",
+ },
+ Updates: &sourcepolicypb.Update{
+ Identifier: "docker-image://docker.io/library/busybox:1.34.1-uclibc@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", // invalid
+ },
+ },
+ },
+ },
+ expectedErr: "docker.io/library/busybox:1.34.1-uclibc@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa: not found",
+ },
+ {
+ // Invalid http source
+ srcPol: &sourcepolicypb.Policy{
+ Rules: []*sourcepolicypb.Rule{
+ {
+ Action: sourcepolicypb.PolicyAction_CONVERT,
+ Selector: &sourcepolicypb.Selector{
+ Identifier: "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md",
+ },
+ Updates: &sourcepolicypb.Update{
+ Attrs: map[string]string{pb.AttrHTTPChecksum: "sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"}, // invalid
+ },
+ },
+ },
+ },
+ expectedErr: "digest mismatch sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53: sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
+ },
+ }
+ for i, tc := range testCases {
+ tc := tc
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ _, err = c.Build(sb.Context(), SolveOpt{SourcePolicy: tc.srcPol}, "", frontend, nil)
+ if tc.expectedErr == "" {
+ require.NoError(t, err)
+ } else {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), tc.expectedErr)
+ }
+ })
+ }
+
+ t.Run("Frontend policies", func(t *testing.T) {
+ denied := "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md"
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.Image("busybox:1.34.1-uclibc").File(
+ llb.Copy(llb.HTTP(denied),
+ "README.md", "README.md"))
+ def, err := st.Marshal(sb.Context())
+ if err != nil {
+ return nil, err
+ }
+ return c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ SourcePolicies: []*spb.Policy{{
+ Rules: []*spb.Rule{
+ {
+ Action: spb.PolicyAction_DENY,
+ Selector: &spb.Selector{
+ Identifier: denied,
+ },
+ },
+ },
+ }},
+ })
+ }
+
+ _, err = c.Build(sb.Context(), SolveOpt{}, "", frontend, nil)
+ require.ErrorContains(t, err, sourcepolicy.ErrSourceDenied.Error())
+ })
+}
diff --git a/client/connhelper/ssh/ssh.go b/client/connhelper/ssh/ssh.go
new file mode 100644
index 000000000000..e3666b572f2a
--- /dev/null
+++ b/client/connhelper/ssh/ssh.go
@@ -0,0 +1,78 @@
+// Package ssh provides connhelper for ssh://
+package ssh
+
+import (
+ "context"
+ "net"
+ "net/url"
+
+ "github.com/docker/cli/cli/connhelper/commandconn"
+ "github.com/moby/buildkit/client/connhelper"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ connhelper.Register("ssh", Helper)
+}
+
+// Helper returns helper for connecting through an SSH URL.
+func Helper(u *url.URL) (*connhelper.ConnectionHelper, error) {
+ sp, err := SpecFromURL(u)
+ if err != nil {
+ return nil, err
+ }
+ return &connhelper.ConnectionHelper{
+ ContextDialer: func(ctx context.Context, addr string) (net.Conn, error) {
+ args := []string{}
+ if sp.User != "" {
+ args = append(args, "-l", sp.User)
+ }
+ if sp.Port != "" {
+ args = append(args, "-p", sp.Port)
+ }
+ args = append(args, "--", sp.Host)
+ args = append(args, "buildctl")
+ if socket := sp.Socket; socket != "" {
+ args = append(args, "--addr", "unix://"+socket)
+ }
+ args = append(args, "dial-stdio")
+ // using background context because context remains active for the duration of the process, after dial has completed
+ return commandconn.New(context.Background(), "ssh", args...)
+ },
+ }, nil
+}
+
+// Spec
+type Spec struct {
+ User string
+ Host string
+ Port string
+ Socket string
+}
+
+// SpecFromURL creates Spec from URL.
+// URL is like ssh://@host:
+// Only part is mandatory.
+func SpecFromURL(u *url.URL) (*Spec, error) {
+ sp := Spec{
+ Host: u.Hostname(),
+ Port: u.Port(),
+ Socket: u.Path,
+ }
+ if user := u.User; user != nil {
+ sp.User = user.Username()
+ if _, ok := user.Password(); ok {
+ return nil, errors.New("plain-text password is not supported")
+ }
+ }
+ if sp.Host == "" {
+ return nil, errors.Errorf("no host specified")
+ }
+ if u.RawQuery != "" {
+ return nil, errors.Errorf("extra query after the host: %q", u.RawQuery)
+ }
+ if u.Fragment != "" {
+ return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment)
+ }
+ return &sp, nil
+}
diff --git a/client/connhelper/ssh/ssh_test.go b/client/connhelper/ssh/ssh_test.go
new file mode 100644
index 000000000000..e4afb31cf882
--- /dev/null
+++ b/client/connhelper/ssh/ssh_test.go
@@ -0,0 +1,39 @@
+package ssh
+
+import (
+ "net/url"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestSpecFromURL(t *testing.T) {
+ cases := map[string]*Spec{
+ "ssh://foo": {
+ Host: "foo",
+ },
+ "ssh://me@foo:10022/s/o/c/k/e/t.sock": {
+ User: "me", Host: "foo", Port: "10022", Socket: "/s/o/c/k/e/t.sock",
+ },
+ "ssh://me:passw0rd@foo": nil,
+ "ssh://foo/bar": {
+ Host: "foo", Socket: "/bar",
+ },
+ "ssh://foo?bar": nil,
+ "ssh://foo#bar": nil,
+ "ssh://": nil,
+ }
+ for s, expected := range cases {
+ u, err := url.Parse(s)
+ if err != nil {
+ t.Fatal(err)
+ }
+ got, err := SpecFromURL(u)
+ if expected != nil {
+ require.NoError(t, err)
+ require.EqualValues(t, expected, got, s)
+ } else {
+ require.Error(t, err, s)
+ }
+ }
+}
diff --git a/client/diskusage.go b/client/diskusage.go
index 2a2373f9d36a..0918c7dcd40f 100644
--- a/client/diskusage.go
+++ b/client/diskusage.go
@@ -10,18 +10,18 @@ import (
)
type UsageInfo struct {
- ID string
- Mutable bool
- InUse bool
- Size int64
+ ID string `json:"id"`
+ Mutable bool `json:"mutable"`
+ InUse bool `json:"inUse"`
+ Size int64 `json:"size"`
- CreatedAt time.Time
- LastUsedAt *time.Time
- UsageCount int
- Parents []string
- Description string
- RecordType UsageRecordType
- Shared bool
+ CreatedAt time.Time `json:"createdAt"`
+ LastUsedAt *time.Time `json:"lastUsedAt"`
+ UsageCount int `json:"usageCount"`
+ Parents []string `json:"parents"`
+ Description string `json:"description"`
+ RecordType UsageRecordType `json:"recordType"`
+ Shared bool `json:"shared"`
}
func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) {
@@ -31,7 +31,7 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa
}
req := &controlapi.DiskUsageRequest{Filter: info.Filter}
- resp, err := c.controlClient().DiskUsage(ctx, req)
+ resp, err := c.ControlClient().DiskUsage(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to call diskusage")
}
diff --git a/client/info.go b/client/info.go
new file mode 100644
index 000000000000..d5bdbcec8968
--- /dev/null
+++ b/client/info.go
@@ -0,0 +1,40 @@
+package client
+
+import (
+ "context"
+
+ controlapi "github.com/moby/buildkit/api/services/control"
+ apitypes "github.com/moby/buildkit/api/types"
+ "github.com/pkg/errors"
+)
+
+type Info struct {
+ BuildkitVersion BuildkitVersion `json:"buildkitVersion"`
+}
+
+type BuildkitVersion struct {
+ Package string `json:"package"`
+ Version string `json:"version"`
+ Revision string `json:"revision"`
+}
+
+func (c *Client) Info(ctx context.Context) (*Info, error) {
+ res, err := c.ControlClient().Info(ctx, &controlapi.InfoRequest{})
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to call info")
+ }
+ return &Info{
+ BuildkitVersion: fromAPIBuildkitVersion(res.BuildkitVersion),
+ }, nil
+}
+
+func fromAPIBuildkitVersion(in *apitypes.BuildkitVersion) BuildkitVersion {
+ if in == nil {
+ return BuildkitVersion{}
+ }
+ return BuildkitVersion{
+ Package: in.Package,
+ Version: in.Version,
+ Revision: in.Revision,
+ }
+}
diff --git a/client/llb/definition.go b/client/llb/definition.go
index 697c1f54c913..d6dda89bb14b 100644
--- a/client/llb/definition.go
+++ b/client/llb/definition.go
@@ -29,6 +29,10 @@ type DefinitionOp struct {
// NewDefinitionOp returns a new operation from a marshalled definition.
func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) {
+ if def == nil {
+ return nil, errors.New("invalid nil input definition to definition op")
+ }
+
ops := make(map[digest.Digest]*pb.Op)
defs := make(map[digest.Digest][]byte)
platforms := make(map[digest.Digest]*ocispecs.Platform)
diff --git a/client/llb/definition_test.go b/client/llb/definition_test.go
index 8d7a4009aff9..4ef93e0ec357 100644
--- a/client/llb/definition_test.go
+++ b/client/llb/definition_test.go
@@ -118,3 +118,9 @@ func TestDefinitionInputCache(t *testing.T) {
// 1 exec + 2x2 mounts from stA and stB + 1 src = 6 vertexes
require.Equal(t, 6, len(vertexCache))
}
+
+func TestDefinitionNil(t *testing.T) {
+ // should be an error, not a panic
+ _, err := NewDefinitionOp(nil)
+ require.Error(t, err)
+}
diff --git a/client/llb/exec.go b/client/llb/exec.go
index 994804a13992..2b1d9bd3f1ee 100644
--- a/client/llb/exec.go
+++ b/client/llb/exec.go
@@ -192,12 +192,13 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
}
meta := &pb.Meta{
- Args: args,
- Env: env.ToArray(),
- Cwd: cwd,
- User: user,
- Hostname: hostname,
- CgroupParent: cgrpParent,
+ Args: args,
+ Env: env.ToArray(),
+ Cwd: cwd,
+ User: user,
+ Hostname: hostname,
+ CgroupParent: cgrpParent,
+ RemoveMountStubsRecursive: true,
}
extraHosts, err := getExtraHosts(e.base)(ctx, c)
diff --git a/client/llb/llbtest/platform_test.go b/client/llb/llbtest/platform_test.go
index 40c676927b88..f66da1b989ec 100644
--- a/client/llb/llbtest/platform_test.go
+++ b/client/llb/llbtest/platform_test.go
@@ -27,7 +27,7 @@ func TestCustomPlatform(t *testing.T) {
def, err := s.Marshal(context.TODO())
require.NoError(t, err)
- e, err := llbsolver.Load(def.ToPB())
+ e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
require.Equal(t, depth(e), 5)
@@ -56,7 +56,7 @@ func TestDefaultPlatform(t *testing.T) {
def, err := s.Marshal(context.TODO())
require.NoError(t, err)
- e, err := llbsolver.Load(def.ToPB())
+ e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
require.Equal(t, depth(e), 2)
@@ -80,7 +80,7 @@ func TestPlatformOnMarshal(t *testing.T) {
def, err := s.Marshal(context.TODO(), llb.Windows)
require.NoError(t, err)
- e, err := llbsolver.Load(def.ToPB())
+ e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
expected := ocispecs.Platform{OS: "windows", Architecture: "amd64"}
@@ -100,7 +100,7 @@ func TestPlatformMixed(t *testing.T) {
def, err := s1.Marshal(context.TODO(), llb.LinuxAmd64)
require.NoError(t, err)
- e, err := llbsolver.Load(def.ToPB())
+ e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
require.Equal(t, depth(e), 4)
@@ -129,7 +129,7 @@ func TestFallbackPath(t *testing.T) {
// the cap.
def, err := llb.Scratch().Run(llb.Shlex("cmd")).Marshal(context.TODO(), llb.LinuxAmd64)
require.NoError(t, err)
- e, err := llbsolver.Load(def.ToPB())
+ e, err := llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
require.False(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath])
_, ok := getenv(e, "PATH")
@@ -141,7 +141,7 @@ func TestFallbackPath(t *testing.T) {
require.Error(t, cs.Supports(pb.CapExecMetaSetsDefaultPath))
def, err = llb.Scratch().Run(llb.Shlex("cmd")).Marshal(context.TODO(), llb.LinuxAmd64, llb.WithCaps(cs))
require.NoError(t, err)
- e, err = llbsolver.Load(def.ToPB())
+ e, err = llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
require.False(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath])
v, ok := getenv(e, "PATH")
@@ -155,7 +155,7 @@ func TestFallbackPath(t *testing.T) {
require.NoError(t, cs.Supports(pb.CapExecMetaSetsDefaultPath))
def, err = llb.Scratch().Run(llb.Shlex("cmd")).Marshal(context.TODO(), llb.LinuxAmd64, llb.WithCaps(cs))
require.NoError(t, err)
- e, err = llbsolver.Load(def.ToPB())
+ e, err = llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
require.True(t, def.Metadata[e.Vertex.Digest()].Caps[pb.CapExecMetaSetsDefaultPath])
_, ok = getenv(e, "PATH")
@@ -171,7 +171,7 @@ func TestFallbackPath(t *testing.T) {
} {
def, err = llb.Scratch().AddEnv("PATH", "foo").Run(llb.Shlex("cmd")).Marshal(context.TODO(), append(cos, llb.LinuxAmd64)...)
require.NoError(t, err)
- e, err = llbsolver.Load(def.ToPB())
+ e, err = llbsolver.Load(context.TODO(), def.ToPB(), nil)
require.NoError(t, err)
// pb.CapExecMetaSetsDefaultPath setting is irrelevant (and variable).
v, ok = getenv(e, "PATH")
diff --git a/client/llb/marshal.go b/client/llb/marshal.go
index e59e560ee95c..3b02299e431d 100644
--- a/client/llb/marshal.go
+++ b/client/llb/marshal.go
@@ -2,7 +2,6 @@ package llb
import (
"io"
- "io/ioutil"
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/solver/pb"
@@ -67,7 +66,7 @@ func WriteTo(def *Definition, w io.Writer) error {
}
func ReadFrom(r io.Reader) (*Definition, error) {
- b, err := ioutil.ReadAll(r)
+ b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@@ -88,10 +87,7 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
c.Platform = p
}
- for _, wc := range override.WorkerConstraints {
- c.WorkerConstraints = append(c.WorkerConstraints, wc)
- }
-
+ c.WorkerConstraints = append(c.WorkerConstraints, override.WorkerConstraints...)
c.Metadata = mergeMetadata(c.Metadata, override.Metadata)
if c.Platform == nil {
diff --git a/client/llb/resolver.go b/client/llb/resolver.go
index af1edc10715e..b3b9cdf751c7 100644
--- a/client/llb/resolver.go
+++ b/client/llb/resolver.go
@@ -23,13 +23,35 @@ func ResolveDigest(v bool) ImageOption {
})
}
+func WithLayerLimit(l int) ImageOption {
+ return imageOptionFunc(func(ii *ImageInfo) {
+ ii.layerLimit = &l
+ })
+}
+
// ImageMetaResolver can resolve image config metadata from a reference
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error)
}
+type ResolverType int
+
+const (
+ ResolverTypeRegistry ResolverType = iota
+ ResolverTypeOCILayout
+)
+
type ResolveImageConfigOpt struct {
+ ResolverType
+
Platform *ocispecs.Platform
ResolveMode string
LogName string
+
+ Store ResolveImageConfigOptStore
+}
+
+type ResolveImageConfigOptStore struct {
+ SessionID string
+ StoreID string
}
diff --git a/client/llb/source.go b/client/llb/source.go
index c1be90b70405..27c8c1b617f2 100644
--- a/client/llb/source.go
+++ b/client/llb/source.go
@@ -116,6 +116,11 @@ func Image(ref string, opts ...ImageOption) State {
attrs[pb.AttrImageRecordType] = info.RecordType
}
+ if ll := info.layerLimit; ll != nil {
+ attrs[pb.AttrImageLayerLimit] = strconv.FormatInt(int64(*ll), 10)
+ addCap(&info.Constraints, pb.CapSourceImageLayerLimit)
+ }
+
src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial
if err != nil {
src.err = err
@@ -127,8 +132,9 @@ func Image(ref string, opts ...ImageOption) State {
p = c.Platform
}
_, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{
- Platform: p,
- ResolveMode: info.resolveMode.String(),
+ Platform: p,
+ ResolveMode: info.resolveMode.String(),
+ ResolverType: ResolverTypeRegistry,
})
if err != nil {
return State{}, err
@@ -142,8 +148,9 @@ func Image(ref string, opts ...ImageOption) State {
p = c.Platform
}
dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{
- Platform: p,
- ResolveMode: info.resolveMode.String(),
+ Platform: p,
+ ResolveMode: info.resolveMode.String(),
+ ResolverType: ResolverTypeRegistry,
})
if err != nil {
return State{}, err
@@ -204,6 +211,7 @@ type ImageInfo struct {
metaResolver ImageMetaResolver
resolveDigest bool
resolveMode ResolveMode
+ layerLimit *int
RecordType string
}
@@ -446,6 +454,59 @@ func Differ(t DiffType, required bool) LocalOption {
})
}
+func OCILayout(ref string, opts ...OCILayoutOption) State {
+ gi := &OCILayoutInfo{}
+
+ for _, o := range opts {
+ o.SetOCILayoutOption(gi)
+ }
+ attrs := map[string]string{}
+ if gi.sessionID != "" {
+ attrs[pb.AttrOCILayoutSessionID] = gi.sessionID
+ }
+ if gi.storeID != "" {
+ attrs[pb.AttrOCILayoutStoreID] = gi.storeID
+ }
+ if gi.layerLimit != nil {
+ attrs[pb.AttrOCILayoutLayerLimit] = strconv.FormatInt(int64(*gi.layerLimit), 10)
+ }
+
+ addCap(&gi.Constraints, pb.CapSourceOCILayout)
+
+ source := NewSource("oci-layout://"+ref, attrs, gi.Constraints)
+ return NewState(source.Output())
+}
+
+type OCILayoutOption interface {
+ SetOCILayoutOption(*OCILayoutInfo)
+}
+
+type ociLayoutOptionFunc func(*OCILayoutInfo)
+
+func (fn ociLayoutOptionFunc) SetOCILayoutOption(li *OCILayoutInfo) {
+ fn(li)
+}
+
+func OCIStore(sessionID string, storeID string) OCILayoutOption {
+ return ociLayoutOptionFunc(func(oi *OCILayoutInfo) {
+ oi.sessionID = sessionID
+ oi.storeID = storeID
+ })
+}
+
+func OCILayerLimit(limit int) OCILayoutOption {
+ return ociLayoutOptionFunc(func(oi *OCILayoutInfo) {
+ oi.layerLimit = &limit
+ })
+}
+
+type OCILayoutInfo struct {
+ constraintsWrapper
+ sessionID string
+ storeID string
+ layerLimit *int
+}
+
type DiffType string
const (
@@ -549,7 +610,7 @@ func Chown(uid, gid int) HTTPOption {
}
func platformSpecificSource(id string) bool {
- return strings.HasPrefix(id, "docker-image://")
+ return strings.HasPrefix(id, "docker-image://") || strings.HasPrefix(id, "oci-layout://")
}
func addCap(c *Constraints, id apicaps.CapID) {
diff --git a/client/llb/sourcemap.go b/client/llb/sourcemap.go
index 149355d92e5a..17cc1de6f538 100644
--- a/client/llb/sourcemap.go
+++ b/client/llb/sourcemap.go
@@ -61,7 +61,7 @@ func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) {
}
smc.index[l.SourceMap] = idx
}
- smc.locations[dgst] = ls
+ smc.locations[dgst] = append(smc.locations[dgst], ls...)
}
func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) {
diff --git a/client/llb/state.go b/client/llb/state.go
index 0295f635ccfa..7d35f3be5968 100644
--- a/client/llb/state.go
+++ b/client/llb/state.go
@@ -199,10 +199,10 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect
if opMeta != nil {
def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta)
}
+ s.Add(dgst, sls)
if _, ok := cache[dgst]; ok {
return def, nil
}
- s.Add(dgst, sls)
def.Def = append(def.Def, dt)
cache[dgst] = struct{}{}
return def, nil
@@ -230,13 +230,7 @@ func (s State) WithOutput(o Output) State {
}
func (s State) WithImageConfig(c []byte) (State, error) {
- var img struct {
- Config struct {
- Env []string `json:"Env,omitempty"`
- WorkingDir string `json:"WorkingDir,omitempty"`
- User string `json:"User,omitempty"`
- } `json:"config,omitempty"`
- }
+ var img ocispecs.Image
if err := json.Unmarshal(c, &img); err != nil {
return State{}, err
}
@@ -251,6 +245,13 @@ func (s State) WithImageConfig(c []byte) (State, error) {
}
}
s = s.Dir(img.Config.WorkingDir)
+ if img.Architecture != "" && img.OS != "" {
+ s = s.Platform(ocispecs.Platform{
+ OS: img.OS,
+ Architecture: img.Architecture,
+ Variant: img.Variant,
+ })
+ }
return s, nil
}
@@ -454,6 +455,7 @@ type ConstraintsOpt interface {
HTTPOption
ImageOption
GitOption
+ OCILayoutOption
}
type constraintsOptFunc func(m *Constraints)
@@ -470,6 +472,10 @@ func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) {
li.applyConstraints(fn)
}
+func (fn constraintsOptFunc) SetOCILayoutOption(oi *OCILayoutInfo) {
+ oi.applyConstraints(fn)
+}
+
func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) {
hi.applyConstraints(fn)
}
@@ -611,6 +617,7 @@ var (
LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"})
LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"})
LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"})
+ LinuxPpc64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64"})
LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"})
Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"})
Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"})
@@ -618,9 +625,7 @@ var (
func Require(filters ...string) ConstraintsOpt {
return constraintsOptFunc(func(c *Constraints) {
- for _, f := range filters {
- c.WorkerConstraints = append(c.WorkerConstraints, f)
- }
+ c.WorkerConstraints = append(c.WorkerConstraints, filters...)
})
}
diff --git a/client/llb/state_test.go b/client/llb/state_test.go
index a29af4dc5fbb..35a901a1d3fd 100644
--- a/client/llb/state_test.go
+++ b/client/llb/state_test.go
@@ -99,6 +99,44 @@ func TestStateSourceMapMarshal(t *testing.T) {
require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[2].SourceIndex)
require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[2].Ranges))
require.Equal(t, int32(9), def.Source.Locations[dgst.String()].Locations[2].Ranges[0].Start.Line)
+
+ s = Merge([]State{s, Image("myimage",
+ sm1.Location([]*pb.Range{{Start: pb.Position{Line: 10}}}),
+ )})
+ def, err = s.Marshal(context.TODO())
+ require.NoError(t, err)
+ require.Equal(t, 3, len(def.Def))
+ dgst = digest.FromBytes(def.Def[0])
+
+ require.Equal(t, 2, len(def.Source.Infos))
+ require.Equal(t, 2, len(def.Source.Locations))
+
+ require.Equal(t, "foo", def.Source.Infos[0].Filename)
+ require.Equal(t, []byte("data1"), def.Source.Infos[0].Data)
+ require.Nil(t, def.Source.Infos[0].Definition)
+
+ require.Equal(t, "bar", def.Source.Infos[1].Filename)
+ require.Equal(t, []byte("data2"), def.Source.Infos[1].Data)
+ require.Nil(t, def.Source.Infos[1].Definition)
+
+ require.NotNil(t, def.Source.Locations[dgst.String()])
+ require.Equal(t, 4, len(def.Source.Locations[dgst.String()].Locations))
+
+ require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[0].SourceIndex)
+ require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[0].Ranges))
+ require.Equal(t, int32(7), def.Source.Locations[dgst.String()].Locations[0].Ranges[0].Start.Line)
+
+ require.Equal(t, int32(1), def.Source.Locations[dgst.String()].Locations[1].SourceIndex)
+ require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[1].Ranges))
+ require.Equal(t, int32(8), def.Source.Locations[dgst.String()].Locations[1].Ranges[0].Start.Line)
+
+ require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[2].SourceIndex)
+ require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[2].Ranges))
+ require.Equal(t, int32(9), def.Source.Locations[dgst.String()].Locations[2].Ranges[0].Start.Line)
+
+ require.Equal(t, int32(0), def.Source.Locations[dgst.String()].Locations[3].SourceIndex)
+ require.Equal(t, 1, len(def.Source.Locations[dgst.String()].Locations[3].Ranges))
+ require.Equal(t, int32(10), def.Source.Locations[dgst.String()].Locations[3].Ranges[0].Start.Line)
}
func TestPlatformFromImage(t *testing.T) {
diff --git a/client/mergediff_test.go b/client/mergediff_test.go
index da1d12d42f55..61fdc9b5062c 100644
--- a/client/mergediff_test.go
+++ b/client/mergediff_test.go
@@ -3,7 +3,6 @@ package client
import (
"context"
"fmt"
- "os"
"strings"
"testing"
@@ -1192,6 +1191,13 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) {
t.Skip("rootless")
}
+ switch tc.name {
+ case "TestDiffUpperScratch":
+ if integration.IsTestDockerdMoby(sb) {
+ t.Skip("failed to handle changes: lstat ... no such file or directory: https://github.com/moby/buildkit/pull/2726#issuecomment-1070978499")
+ }
+ }
+
requiresLinux(t)
cdAddress := sb.ContainerdAddress()
@@ -1218,7 +1224,7 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) {
var exportInlineCacheOpts []CacheOptionsEntry
var importRegistryCacheOpts []CacheOptionsEntry
var exportRegistryCacheOpts []CacheOptionsEntry
- if os.Getenv("TEST_DOCKERD") != "1" {
+ if !integration.IsTestDockerd() {
importInlineCacheOpts = []CacheOptionsEntry{{
Type: "registry",
Attrs: map[string]string{
@@ -1245,7 +1251,7 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) {
resetState(t, c, sb)
requireContents(ctx, t, c, sb, tc.state, nil, exportInlineCacheOpts, imageTarget, tc.contents(sb))
- if os.Getenv("TEST_DOCKERD") == "1" {
+ if integration.IsTestDockerd() {
return
}
@@ -1266,8 +1272,9 @@ func (tc verifyContents) Run(t *testing.T, sb integration.Sandbox) {
{
Type: ExporterImage,
Attrs: map[string]string{
- "name": imageTarget,
- "push": "true",
+ "name": imageTarget,
+ "push": "true",
+ "unsafe-internal-store-allow-incomplete": "true",
},
},
},
@@ -1310,7 +1317,6 @@ func (tc verifyBlobReuse) Name() string {
}
func (tc verifyBlobReuse) Run(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
requiresLinux(t)
cdAddress := sb.ContainerdAddress()
diff --git a/client/ociindex/ociindex.go b/client/ociindex/ociindex.go
index a9c100a95bcd..3731ff36bb23 100644
--- a/client/ociindex/ociindex.go
+++ b/client/ociindex/ociindex.go
@@ -2,8 +2,9 @@ package ociindex
import (
"encoding/json"
- "io/ioutil"
+ "io"
"os"
+ "path"
"github.com/gofrs/flock"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@@ -11,69 +12,86 @@ import (
)
const (
- // IndexJSONLockFileSuffix is the suffix of the lock file
- IndexJSONLockFileSuffix = ".lock"
+ // indexFile is the name of the index file
+ indexFile = "index.json"
+
+ // lockFileSuffix is the suffix of the lock file
+ lockFileSuffix = ".lock"
)
-// PutDescToIndex puts desc to index with tag.
-// Existing manifests with the same tag will be removed from the index.
-func PutDescToIndex(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error {
- if index == nil {
- index = &ocispecs.Index{}
+type StoreIndex struct {
+ indexPath string
+ lockPath string
+}
+
+func NewStoreIndex(storePath string) StoreIndex {
+ indexPath := path.Join(storePath, indexFile)
+ return StoreIndex{
+ indexPath: indexPath,
+ lockPath: indexPath + lockFileSuffix,
}
- if index.SchemaVersion == 0 {
- index.SchemaVersion = 2
+}
+
+func (s StoreIndex) Read() (*ocispecs.Index, error) {
+ lock := flock.New(s.lockPath)
+ locked, err := lock.TryRLock()
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not lock %s", s.lockPath)
}
- if tag != "" {
- if desc.Annotations == nil {
- desc.Annotations = make(map[string]string)
- }
- desc.Annotations[ocispecs.AnnotationRefName] = tag
- // remove existing manifests with the same tag
- var manifests []ocispecs.Descriptor
- for _, m := range index.Manifests {
- if m.Annotations[ocispecs.AnnotationRefName] != tag {
- manifests = append(manifests, m)
- }
- }
- index.Manifests = manifests
+ if !locked {
+ return nil, errors.Errorf("could not lock %s", s.lockPath)
}
- index.Manifests = append(index.Manifests, desc)
- return nil
+ defer func() {
+ lock.Unlock()
+ os.RemoveAll(s.lockPath)
+ }()
+
+ b, err := os.ReadFile(s.indexPath)
+ if err != nil {
+ return nil, errors.Wrapf(err, "could not read %s", s.indexPath)
+ }
+ var idx ocispecs.Index
+ if err := json.Unmarshal(b, &idx); err != nil {
+ return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b))
+ }
+ return &idx, nil
}
-func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor, tag string) error {
- lockPath := indexJSONPath + IndexJSONLockFileSuffix
- lock := flock.New(lockPath)
+func (s StoreIndex) Put(tag string, desc ocispecs.Descriptor) error {
+ lock := flock.New(s.lockPath)
locked, err := lock.TryLock()
if err != nil {
- return errors.Wrapf(err, "could not lock %s", lockPath)
+ return errors.Wrapf(err, "could not lock %s", s.lockPath)
}
if !locked {
- return errors.Errorf("could not lock %s", lockPath)
+ return errors.Errorf("could not lock %s", s.lockPath)
}
defer func() {
lock.Unlock()
- os.RemoveAll(lockPath)
+ os.RemoveAll(s.lockPath)
}()
- f, err := os.OpenFile(indexJSONPath, os.O_RDWR|os.O_CREATE, 0644)
+
+ f, err := os.OpenFile(s.indexPath, os.O_RDWR|os.O_CREATE, 0644)
if err != nil {
- return errors.Wrapf(err, "could not open %s", indexJSONPath)
+ return errors.Wrapf(err, "could not open %s", s.indexPath)
}
defer f.Close()
+
var idx ocispecs.Index
- b, err := ioutil.ReadAll(f)
+ b, err := io.ReadAll(f)
if err != nil {
- return errors.Wrapf(err, "could not read %s", indexJSONPath)
+ return errors.Wrapf(err, "could not read %s", s.indexPath)
}
if len(b) > 0 {
if err := json.Unmarshal(b, &idx); err != nil {
- return errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b))
+ return errors.Wrapf(err, "could not unmarshal %s (%q)", s.indexPath, string(b))
}
}
- if err = PutDescToIndex(&idx, desc, tag); err != nil {
+
+ if err = insertDesc(&idx, desc, tag); err != nil {
return err
}
+
b, err = json.Marshal(idx)
if err != nil {
return err
@@ -87,27 +105,56 @@ func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor
return nil
}
-func ReadIndexJSONFileLocked(indexJSONPath string) (*ocispecs.Index, error) {
- lockPath := indexJSONPath + IndexJSONLockFileSuffix
- lock := flock.New(lockPath)
- locked, err := lock.TryRLock()
+func (s StoreIndex) Get(tag string) (*ocispecs.Descriptor, error) {
+ idx, err := s.Read()
if err != nil {
- return nil, errors.Wrapf(err, "could not lock %s", lockPath)
+ return nil, err
}
- if !locked {
- return nil, errors.Errorf("could not lock %s", lockPath)
+
+ for _, m := range idx.Manifests {
+ if t, ok := m.Annotations[ocispecs.AnnotationRefName]; ok && t == tag {
+ return &m, nil
+ }
}
- defer func() {
- lock.Unlock()
- os.RemoveAll(lockPath)
- }()
- b, err := ioutil.ReadFile(indexJSONPath)
+ return nil, nil
+}
+
+func (s StoreIndex) GetSingle() (*ocispecs.Descriptor, error) {
+ idx, err := s.Read()
if err != nil {
- return nil, errors.Wrapf(err, "could not read %s", indexJSONPath)
+ return nil, err
}
- var idx ocispecs.Index
- if err := json.Unmarshal(b, &idx); err != nil {
- return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b))
+
+ if len(idx.Manifests) == 1 {
+ return &idx.Manifests[0], nil
}
- return &idx, nil
+ return nil, nil
+}
+
+// insertDesc puts desc to index with tag.
+// Existing manifests with the same tag will be removed from the index.
+func insertDesc(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error {
+ if index == nil {
+ return nil
+ }
+
+ if index.SchemaVersion == 0 {
+ index.SchemaVersion = 2
+ }
+ if tag != "" {
+ if desc.Annotations == nil {
+ desc.Annotations = make(map[string]string)
+ }
+ desc.Annotations[ocispecs.AnnotationRefName] = tag
+ // remove existing manifests with the same tag
+ var manifests []ocispecs.Descriptor
+ for _, m := range index.Manifests {
+ if m.Annotations[ocispecs.AnnotationRefName] != tag {
+ manifests = append(manifests, m)
+ }
+ }
+ index.Manifests = manifests
+ }
+ index.Manifests = append(index.Manifests, desc)
+ return nil
}
diff --git a/client/prune.go b/client/prune.go
index ed4815cb5aac..af8491385558 100644
--- a/client/prune.go
+++ b/client/prune.go
@@ -23,7 +23,7 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti
if info.All {
req.All = true
}
- cl, err := c.controlClient().Prune(ctx, req)
+ cl, err := c.ControlClient().Prune(ctx, req)
if err != nil {
return errors.Wrap(err, "failed to call prune")
}
diff --git a/client/solve.go b/client/solve.go
index f14d9c410d79..65183d61cd88 100644
--- a/client/solve.go
+++ b/client/solve.go
@@ -2,6 +2,7 @@ package client
import (
"context"
+ "encoding/base64"
"encoding/json"
"io"
"os"
@@ -14,16 +15,19 @@ import (
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/ociindex"
+ "github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
sessioncontent "github.com/moby/buildkit/session/content"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver/pb"
+ spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/entitlements"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
+ "github.com/tonistiigi/fsutil"
fstypes "github.com/tonistiigi/fsutil/types"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
@@ -32,6 +36,7 @@ import (
type SolveOpt struct {
Exports []ExportEntry
LocalDirs map[string]string
+ OCIStores map[string]content.Store
SharedKey string
Frontend string
FrontendAttrs map[string]string
@@ -42,6 +47,9 @@ type SolveOpt struct {
AllowedEntitlements []entitlements.Entitlement
SharedSession *session.Session // TODO: refactor to better session syncing
SessionPreInitialized bool // TODO: refactor to better session syncing
+ Internal bool
+ SourcePolicy *spb.Policy
+ Ref string
}
type ExportEntry struct {
@@ -88,6 +96,9 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
ref := identity.NewID()
+ if opt.Ref != "" {
+ ref = opt.Ref
+ }
eg, ctx := errgroup.WithContext(ctx)
statusContext, cancelStatus := context.WithCancel(context.Background())
@@ -122,6 +133,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
ex = opt.Exports[0]
}
+ storesToUpdate := []string{}
+
if !opt.SessionPreInitialized {
if len(syncedDirs) > 0 {
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
@@ -131,50 +144,85 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
s.Allow(a)
}
+ contentStores := map[string]content.Store{}
+ for key, store := range cacheOpt.contentStores {
+ contentStores[key] = store
+ }
+ for key, store := range opt.OCIStores {
+ key2 := "oci:" + key
+ if _, ok := contentStores[key2]; ok {
+ return nil, errors.Errorf("oci store key %q already exists", key)
+ }
+ contentStores[key2] = store
+ }
+
+ var supportFile bool
+ var supportDir bool
switch ex.Type {
case ExporterLocal:
- if ex.Output != nil {
- return nil, errors.New("output file writer is not supported by local exporter")
- }
- if ex.OutputDir == "" {
- return nil, errors.New("output directory is required for local exporter")
- }
- s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
- case ExporterOCI, ExporterDocker, ExporterTar:
- if ex.OutputDir != "" {
- return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
- }
+ supportDir = true
+ case ExporterTar:
+ supportFile = true
+ case ExporterOCI, ExporterDocker:
+ supportDir = ex.OutputDir != ""
+ supportFile = ex.Output != nil
+ }
+
+ if supportFile && supportDir {
+ return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type)
+ }
+ if !supportFile && ex.Output != nil {
+ return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
+ }
+ if !supportDir && ex.OutputDir != "" {
+ return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
+ }
+
+ if supportFile {
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
}
s.Allow(filesync.NewFSSyncTarget(ex.Output))
- default:
- if ex.Output != nil {
- return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
+ }
+ if supportDir {
+ if ex.OutputDir == "" {
+ return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
}
- if ex.OutputDir != "" {
- return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
+ switch ex.Type {
+ case ExporterOCI, ExporterDocker:
+ if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
+ return nil, err
+ }
+ cs, err := contentlocal.NewStore(ex.OutputDir)
+ if err != nil {
+ return nil, err
+ }
+ contentStores["export"] = cs
+ storesToUpdate = append(storesToUpdate, ex.OutputDir)
+ default:
+ s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
}
}
- if len(cacheOpt.contentStores) > 0 {
- s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores))
+ if len(contentStores) > 0 {
+ s.Allow(sessioncontent.NewAttachable(contentStores))
}
eg.Go(func() error {
sd := c.sessionDialer
if sd == nil {
- sd = grpchijack.Dialer(c.controlClient())
+ sd = grpchijack.Dialer(c.ControlClient())
}
return s.Run(statusContext, sd)
})
}
+ frontendAttrs := map[string]string{}
+ for k, v := range opt.FrontendAttrs {
+ frontendAttrs[k] = v
+ }
for k, v := range cacheOpt.frontendAttrs {
- if opt.FrontendAttrs == nil {
- opt.FrontendAttrs = map[string]string{}
- }
- opt.FrontendAttrs[k] = v
+ frontendAttrs[k] = v
}
solveCtx, cancelSolve := context.WithCancel(ctx)
@@ -188,8 +236,10 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
<-time.After(3 * time.Second)
cancelStatus()
}()
- bklog.G(ctx).Debugf("stopping session")
- s.Close()
+ if !opt.SessionPreInitialized {
+ bklog.G(ctx).Debugf("stopping session")
+ s.Close()
+ }
}()
var pbd *pb.Definition
if def != nil {
@@ -205,17 +255,19 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
frontendInputs[key] = def.ToPB()
}
- resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{
+ resp, err := c.ControlClient().Solve(ctx, &controlapi.SolveRequest{
Ref: ref,
Definition: pbd,
Exporter: ex.Type,
ExporterAttrs: ex.Attrs,
Session: s.ID(),
Frontend: opt.Frontend,
- FrontendAttrs: opt.FrontendAttrs,
+ FrontendAttrs: frontendAttrs,
FrontendInputs: frontendInputs,
Cache: cacheOpt.options,
Entitlements: opt.AllowedEntitlements,
+ Internal: opt.Internal,
+ SourcePolicy: opt.SourcePolicy,
})
if err != nil {
return errors.Wrap(err, "failed to solve")
@@ -228,7 +280,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
if runGateway != nil {
eg.Go(func() error {
- err := runGateway(ref, s, opt.FrontendAttrs)
+ err := runGateway(ref, s, frontendAttrs)
if err == nil {
return nil
}
@@ -249,7 +301,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
eg.Go(func() error {
- stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{
+ stream, err := c.ControlClient().Status(statusContext, &controlapi.StatusRequest{
Ref: ref,
})
if err != nil {
@@ -263,52 +315,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
return errors.Wrap(err, "failed to receive status")
}
- s := SolveStatus{}
- for _, v := range resp.Vertexes {
- s.Vertexes = append(s.Vertexes, &Vertex{
- Digest: v.Digest,
- Inputs: v.Inputs,
- Name: v.Name,
- Started: v.Started,
- Completed: v.Completed,
- Error: v.Error,
- Cached: v.Cached,
- ProgressGroup: v.ProgressGroup,
- })
- }
- for _, v := range resp.Statuses {
- s.Statuses = append(s.Statuses, &VertexStatus{
- ID: v.ID,
- Vertex: v.Vertex,
- Name: v.Name,
- Total: v.Total,
- Current: v.Current,
- Timestamp: v.Timestamp,
- Started: v.Started,
- Completed: v.Completed,
- })
- }
- for _, v := range resp.Logs {
- s.Logs = append(s.Logs, &VertexLog{
- Vertex: v.Vertex,
- Stream: int(v.Stream),
- Data: v.Msg,
- Timestamp: v.Timestamp,
- })
- }
- for _, v := range resp.Warnings {
- s.Warnings = append(s.Warnings, &VertexWarning{
- Vertex: v.Vertex,
- Level: int(v.Level),
- Short: v.Short,
- Detail: v.Detail,
- URL: v.Url,
- SourceInfo: v.Info,
- Range: v.Ranges,
- })
- }
if statusChan != nil {
- statusChan <- &s
+ statusChan <- NewSolveStatus(resp)
}
}
})
@@ -323,8 +331,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil {
return nil, err
}
- for indexJSONPath, tag := range cacheOpt.indicesToUpdate {
- if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil {
+ for storePath, tag := range cacheOpt.storesToUpdate {
+ idx := ociindex.NewStoreIndex(storePath)
+ if err := idx.Put(tag, manifestDesc); err != nil {
+ return nil, err
+ }
+ }
+ }
+ if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" {
+ manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt)
+ if err != nil {
+ return nil, err
+ }
+ var manifestDesc ocispecs.Descriptor
+ if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil {
+ return nil, err
+ }
+ for _, storePath := range storesToUpdate {
+ tag := "latest"
+ if t, ok := res.ExporterResponse["image.name"]; ok {
+ tag = t
+ }
+ idx := ociindex.NewStoreIndex(storePath)
+ if err := idx.Put(tag, manifestDesc); err != nil {
return nil, err
}
}
@@ -332,7 +361,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
return res, nil
}
-func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
+func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) {
for _, d := range localDirs {
fi, err := os.Stat(d)
if err != nil {
@@ -342,16 +371,16 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
return nil, errors.Errorf("%s not a directory", d)
}
}
- resetUIDAndGID := func(p string, st *fstypes.Stat) bool {
+ resetUIDAndGID := func(p string, st *fstypes.Stat) fsutil.MapResult {
st.Uid = 0
st.Gid = 0
- return true
+ return fsutil.MapResultKeep
}
- dirs := make([]filesync.SyncedDir, 0, len(localDirs))
+ dirs := make(filesync.StaticDirSource, len(localDirs))
if def == nil {
for name, d := range localDirs {
- dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
+ dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
}
} else {
for _, dt := range def.Def {
@@ -366,7 +395,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
if !ok {
return nil, errors.Errorf("local directory %s not enabled", name)
}
- dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
+ dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
}
}
}
@@ -383,24 +412,20 @@ func defaultSessionName() string {
}
type cacheOptions struct {
- options controlapi.CacheOptions
- contentStores map[string]content.Store // key: ID of content store ("local:" + csDir)
- indicesToUpdate map[string]string // key: index.JSON file name, value: tag
- frontendAttrs map[string]string
+ options controlapi.CacheOptions
+ contentStores map[string]content.Store // key: ID of content store ("local:" + csDir)
+ storesToUpdate map[string]string // key: path to content store, value: tag
+ frontendAttrs map[string]string
}
func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cacheOptions, error) {
var (
cacheExports []*controlapi.CacheOptionsEntry
cacheImports []*controlapi.CacheOptionsEntry
- // legacy API is used for registry caches, because the daemon might not support the new API
- legacyExportRef string
- legacyImportRefs []string
)
contentStores := make(map[string]content.Store)
- indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag
+ storesToUpdate := make(map[string]string)
frontendAttrs := make(map[string]string)
- legacyExportAttrs := make(map[string]string)
for _, ex := range opt.CacheExports {
if ex.Type == "local" {
csDir := ex.Attrs["dest"]
@@ -415,26 +440,26 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
return nil, err
}
contentStores["local:"+csDir] = cs
+
+ tag := "latest"
+ if t, ok := ex.Attrs["tag"]; ok {
+ tag = t
+ }
// TODO(AkihiroSuda): support custom index JSON path and tag
- indexJSONPath := filepath.Join(csDir, "index.json")
- indicesToUpdate[indexJSONPath] = "latest"
- }
- if ex.Type == "registry" && legacyExportRef == "" {
- legacyExportRef = ex.Attrs["ref"]
- for k, v := range ex.Attrs {
- if k != "ref" {
- legacyExportAttrs[k] = v
- }
+ storesToUpdate[csDir] = tag
+ }
+ if ex.Type == "registry" {
+ regRef := ex.Attrs["ref"]
+ if regRef == "" {
+ return nil, errors.New("registry cache exporter requires ref")
}
- } else {
- cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
- Type: ex.Type,
- Attrs: ex.Attrs,
- })
}
+ cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
+ Type: ex.Type,
+ Attrs: ex.Attrs,
+ })
}
for _, im := range opt.CacheImports {
- attrs := im.Attrs
if im.Type == "local" {
csDir := im.Attrs["src"]
if csDir == "" {
@@ -445,41 +470,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
continue
}
- // if digest is not specified, load from "latest" tag
- if attrs["digest"] == "" {
- idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
+ // if digest is not specified, attempt to load from tag
+ if im.Attrs["digest"] == "" {
+ tag := "latest"
+ if t, ok := im.Attrs["tag"]; ok {
+ tag = t
+ }
+
+ idx := ociindex.NewStoreIndex(csDir)
+ desc, err := idx.Get(tag)
if err != nil {
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
continue
}
- for _, m := range idx.Manifests {
- if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) {
- attrs["digest"] = string(m.Digest)
- break
- }
- }
- if attrs["digest"] == "" {
- return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
+ if desc != nil {
+ im.Attrs["digest"] = desc.Digest.String()
}
}
+ if im.Attrs["digest"] == "" {
+ return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
+ }
contentStores["local:"+csDir] = cs
}
if im.Type == "registry" {
- legacyImportRef := attrs["ref"]
- legacyImportRefs = append(legacyImportRefs, legacyImportRef)
- } else {
- cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
- Type: im.Type,
- Attrs: attrs,
- })
+ regRef := im.Attrs["ref"]
+ if regRef == "" {
+ return nil, errors.New("registry cache importer requires ref")
+ }
}
+ cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
+ Type: im.Type,
+ Attrs: im.Attrs,
+ })
}
if opt.Frontend != "" || isGateway {
- // use legacy API for registry importers, because the frontend might not support the new API
- if len(legacyImportRefs) > 0 {
- frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")
- }
- // use new API for other importers
if len(cacheImports) > 0 {
s, err := json.Marshal(cacheImports)
if err != nil {
@@ -490,17 +514,12 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
}
res := cacheOptions{
options: controlapi.CacheOptions{
- // old API (for registry caches, planned to be removed in early 2019)
- ExportRefDeprecated: legacyExportRef,
- ExportAttrsDeprecated: legacyExportAttrs,
- ImportRefsDeprecated: legacyImportRefs,
- // new API
Exports: cacheExports,
Imports: cacheImports,
},
- contentStores: contentStores,
- indicesToUpdate: indicesToUpdate,
- frontendAttrs: frontendAttrs,
+ contentStores: contentStores,
+ storesToUpdate: storesToUpdate,
+ frontendAttrs: frontendAttrs,
}
return &res, nil
}
diff --git a/client/status.go b/client/status.go
new file mode 100644
index 000000000000..d692094af3fb
--- /dev/null
+++ b/client/status.go
@@ -0,0 +1,125 @@
+package client
+
+import (
+ controlapi "github.com/moby/buildkit/api/services/control"
+)
+
+var emptyLogVertexSize int
+
+func init() {
+ emptyLogVertex := controlapi.VertexLog{}
+ emptyLogVertexSize = emptyLogVertex.Size()
+}
+
+func NewSolveStatus(resp *controlapi.StatusResponse) *SolveStatus {
+ s := &SolveStatus{}
+ for _, v := range resp.Vertexes {
+ s.Vertexes = append(s.Vertexes, &Vertex{
+ Digest: v.Digest,
+ Inputs: v.Inputs,
+ Name: v.Name,
+ Started: v.Started,
+ Completed: v.Completed,
+ Error: v.Error,
+ Cached: v.Cached,
+ ProgressGroup: v.ProgressGroup,
+ })
+ }
+ for _, v := range resp.Statuses {
+ s.Statuses = append(s.Statuses, &VertexStatus{
+ ID: v.ID,
+ Vertex: v.Vertex,
+ Name: v.Name,
+ Total: v.Total,
+ Current: v.Current,
+ Timestamp: v.Timestamp,
+ Started: v.Started,
+ Completed: v.Completed,
+ })
+ }
+ for _, v := range resp.Logs {
+ s.Logs = append(s.Logs, &VertexLog{
+ Vertex: v.Vertex,
+ Stream: int(v.Stream),
+ Data: v.Msg,
+ Timestamp: v.Timestamp,
+ })
+ }
+ for _, v := range resp.Warnings {
+ s.Warnings = append(s.Warnings, &VertexWarning{
+ Vertex: v.Vertex,
+ Level: int(v.Level),
+ Short: v.Short,
+ Detail: v.Detail,
+ URL: v.Url,
+ SourceInfo: v.Info,
+ Range: v.Ranges,
+ })
+ }
+ return s
+}
+
+func (ss *SolveStatus) Marshal() (out []*controlapi.StatusResponse) {
+ logSize := 0
+ for {
+ retry := false
+ sr := controlapi.StatusResponse{}
+ for _, v := range ss.Vertexes {
+ sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{
+ Digest: v.Digest,
+ Inputs: v.Inputs,
+ Name: v.Name,
+ Started: v.Started,
+ Completed: v.Completed,
+ Error: v.Error,
+ Cached: v.Cached,
+ ProgressGroup: v.ProgressGroup,
+ })
+ }
+ for _, v := range ss.Statuses {
+ sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{
+ ID: v.ID,
+ Vertex: v.Vertex,
+ Name: v.Name,
+ Current: v.Current,
+ Total: v.Total,
+ Timestamp: v.Timestamp,
+ Started: v.Started,
+ Completed: v.Completed,
+ })
+ }
+ for i, v := range ss.Logs {
+ sr.Logs = append(sr.Logs, &controlapi.VertexLog{
+ Vertex: v.Vertex,
+ Stream: int64(v.Stream),
+ Msg: v.Data,
+ Timestamp: v.Timestamp,
+ })
+ logSize += len(v.Data) + emptyLogVertexSize
+ // avoid logs growing big and split apart if they do
+ if logSize > 1024*1024 {
+ ss.Vertexes = nil
+ ss.Statuses = nil
+ ss.Logs = ss.Logs[i+1:]
+ retry = true
+ break
+ }
+ }
+ for _, v := range ss.Warnings {
+ sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{
+ Vertex: v.Vertex,
+ Level: int64(v.Level),
+ Short: v.Short,
+ Detail: v.Detail,
+ Info: v.SourceInfo,
+ Ranges: v.Range,
+ Url: v.URL,
+ })
+ }
+ out = append(out, &sr)
+ if !retry {
+ break
+ }
+ }
+ return
+}
diff --git a/client/workers.go b/client/workers.go
index e5331cd608c4..b7f6f6725d90 100644
--- a/client/workers.go
+++ b/client/workers.go
@@ -13,10 +13,11 @@ import (
// WorkerInfo contains information about a worker
type WorkerInfo struct {
- ID string `json:"id"`
- Labels map[string]string `json:"labels"`
- Platforms []ocispecs.Platform `json:"platforms"`
- GCPolicy []PruneInfo `json:"gcPolicy"`
+ ID string `json:"id"`
+ Labels map[string]string `json:"labels"`
+ Platforms []ocispecs.Platform `json:"platforms"`
+ GCPolicy []PruneInfo `json:"gcPolicy"`
+ BuildkitVersion BuildkitVersion `json:"buildkitVersion"`
}
// ListWorkers lists all active workers
@@ -27,7 +28,7 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
}
req := &controlapi.ListWorkersRequest{Filter: info.Filter}
- resp, err := c.controlClient().ListWorkers(ctx, req)
+ resp, err := c.ControlClient().ListWorkers(ctx, req)
if err != nil {
return nil, errors.Wrap(err, "failed to list workers")
}
@@ -36,10 +37,11 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
for _, w := range resp.Record {
wi = append(wi, &WorkerInfo{
- ID: w.ID,
- Labels: w.Labels,
- Platforms: pb.ToSpecPlatforms(w.Platforms),
- GCPolicy: fromAPIGCPolicy(w.GCPolicy),
+ ID: w.ID,
+ Labels: w.Labels,
+ Platforms: pb.ToSpecPlatforms(w.Platforms),
+ GCPolicy: fromAPIGCPolicy(w.GCPolicy),
+ BuildkitVersion: fromAPIBuildkitVersion(w.BuildkitVersion),
})
}
diff --git a/cmd/buildctl/build.go b/cmd/buildctl/build.go
index f3d36015d0d3..e7cb1f770d3c 100644
--- a/cmd/buildctl/build.go
+++ b/cmd/buildctl/build.go
@@ -4,18 +4,24 @@ import (
"context"
"encoding/base64"
"encoding/json"
+ "fmt"
"io"
"os"
+ "strings"
"github.com/containerd/continuity"
+ "github.com/docker/cli/cli/config"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/cmd/buildctl/build"
bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ gateway "github.com/moby/buildkit/frontend/gateway/client"
+ "github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/session/sshforward/sshprovider"
"github.com/moby/buildkit/solver/pb"
+ spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/progress/progresswriter"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@@ -38,16 +44,6 @@ var buildCommand = cli.Command{
Name: "output,o",
Usage: "Define exports for build result, e.g. --output type=image,name=docker.io/username/image,push=true",
},
- cli.StringFlag{
- Name: "exporter",
- Usage: "Define exporter for build result (DEPRECATED: use --export type=[,=]",
- Hidden: true,
- },
- cli.StringSliceFlag{
- Name: "exporter-opt",
- Usage: "Define custom options for exporter (DEPRECATED: use --output type=[,=]",
- Hidden: true,
- },
cli.StringFlag{
Name: "progress",
Usage: "Set type of progress (auto, plain, tty). Use plain to show container output",
@@ -61,6 +57,10 @@ var buildCommand = cli.Command{
Name: "local",
Usage: "Allow build access to the local directory",
},
+ cli.StringSliceFlag{
+ Name: "oci-layout",
+ Usage: "Allow build access to the local OCI layout",
+ },
cli.StringFlag{
Name: "frontend",
Usage: "Define frontend used for build",
@@ -69,11 +69,6 @@ var buildCommand = cli.Command{
Name: "opt",
Usage: "Define custom options for frontend, e.g. --opt target=foo --opt build-arg:foo=bar",
},
- cli.StringSliceFlag{
- Name: "frontend-opt",
- Usage: "Define custom options for frontend, e.g. --frontend-opt target=foo --frontend-opt build-arg:foo=bar (DEPRECATED: use --opt)",
- Hidden: true,
- },
cli.BoolFlag{
Name: "no-cache",
Usage: "Disable cache for all the vertices",
@@ -82,11 +77,6 @@ var buildCommand = cli.Command{
Name: "export-cache",
Usage: "Export build cache, e.g. --export-cache type=registry,ref=example.com/foo/bar, or --export-cache type=local,dest=path/to/dir",
},
- cli.StringSliceFlag{
- Name: "export-cache-opt",
- Usage: "Define custom options for cache exporting (DEPRECATED: use --export-cache type=,=[,=]",
- Hidden: true,
- },
cli.StringSliceFlag{
Name: "import-cache",
Usage: "Import build cache, e.g. --import-cache type=registry,ref=example.com/foo/bar, or --import-cache type=local,src=path/to/dir",
@@ -107,6 +97,14 @@ var buildCommand = cli.Command{
Name: "metadata-file",
Usage: "Output build metadata (e.g., image digest) to a file as JSON",
},
+ cli.StringFlag{
+ Name: "source-policy-file",
+ Usage: "Read source policy file from a JSON file",
+ },
+ cli.StringFlag{
+ Name: "ref-file",
+ Usage: "Write build ref to a file",
+ },
},
}
@@ -159,7 +157,8 @@ func buildAction(clicontext *cli.Context) error {
logrus.Infof("tracing logs to %s", traceFile.Name())
}
- attachable := []session.Attachable{authprovider.NewDockerAuthProvider(os.Stderr)}
+ dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
+ attachable := []session.Attachable{authprovider.NewDockerAuthProvider(dockerConfig)}
if ssh := clicontext.StringSlice("ssh"); len(ssh) > 0 {
configs, err := build.ParseSSH(ssh)
@@ -186,21 +185,12 @@ func buildAction(clicontext *cli.Context) error {
return err
}
- var exports []client.ExportEntry
- if legacyExporter := clicontext.String("exporter"); legacyExporter != "" {
- logrus.Warnf("--exporter is deprecated. Please use --output type=[,=] instead.")
- if len(clicontext.StringSlice("output")) > 0 {
- return errors.New("--exporter cannot be used with --output")
- }
- exports, err = build.ParseLegacyExporter(clicontext.String("exporter"), clicontext.StringSlice("exporter-opt"))
- } else {
- exports, err = build.ParseOutput(clicontext.StringSlice("output"))
- }
+ exports, err := build.ParseOutput(clicontext.StringSlice("output"))
if err != nil {
return err
}
- cacheExports, err := build.ParseExportCache(clicontext.StringSlice("export-cache"), clicontext.StringSlice("export-cache-opt"))
+ cacheExports, err := build.ParseExportCache(clicontext.StringSlice("export-cache"))
if err != nil {
return err
}
@@ -209,20 +199,38 @@ func buildAction(clicontext *cli.Context) error {
return err
}
+ var srcPol *spb.Policy
+ if srcPolFile := clicontext.String("source-policy-file"); srcPolFile != "" {
+ b, err := os.ReadFile(srcPolFile)
+ if err != nil {
+ return err
+ }
+ var srcPolStruct spb.Policy
+ if err := json.Unmarshal(b, &srcPolStruct); err != nil {
+ return errors.Wrapf(err, "failed to unmarshal source-policy-file %q", srcPolFile)
+ }
+ srcPol = &srcPolStruct
+ }
+
eg, ctx := errgroup.WithContext(bccommon.CommandContext(clicontext))
+ ref := identity.NewID()
+
solveOpt := client.SolveOpt{
Exports: exports,
// LocalDirs is set later
Frontend: clicontext.String("frontend"),
// FrontendAttrs is set later
+ // OCILayouts is set later
CacheExports: cacheExports,
CacheImports: cacheImports,
Session: attachable,
AllowedEntitlements: allowed,
+ SourcePolicy: srcPol,
+ Ref: ref,
}
- solveOpt.FrontendAttrs, err = build.ParseOpt(clicontext.StringSlice("opt"), clicontext.StringSlice("frontend-opt"))
+ solveOpt.FrontendAttrs, err = build.ParseOpt(clicontext.StringSlice("opt"))
if err != nil {
return errors.Wrap(err, "invalid opt")
}
@@ -232,6 +240,11 @@ func buildAction(clicontext *cli.Context) error {
return errors.Wrap(err, "invalid local")
}
+ solveOpt.OCIStores, err = build.ParseOCILayout(clicontext.StringSlice("oci-layout"))
+ if err != nil {
+ return errors.Wrap(err, "invalid oci-layout")
+ }
+
var def *llb.Definition
if clicontext.String("frontend") == "" {
if fi, _ := os.Stdin.Stat(); (fi.Mode() & os.ModeCharDevice) != 0 {
@@ -250,6 +263,13 @@ func buildAction(clicontext *cli.Context) error {
}
}
+ refFile := clicontext.String("ref-file")
+ if refFile != "" {
+ defer func() {
+ continuity.AtomicWriteFile(refFile, []byte(ref), 0666)
+ }()
+ }
+
// not using shared context to not disrupt display but let is finish reporting errors
pw, err := progresswriter.NewPrinter(context.TODO(), os.Stderr, clicontext.String("progress"))
if err != nil {
@@ -283,13 +303,38 @@ func buildAction(clicontext *cli.Context) error {
}
}
+ var subMetadata map[string][]byte
+
eg.Go(func() error {
defer func() {
for _, w := range writers {
close(w.Status())
}
}()
- resp, err := c.Solve(ctx, def, solveOpt, progresswriter.ResetTime(mw.WithPrefix("", false)).Status())
+
+ sreq := gateway.SolveRequest{
+ Frontend: solveOpt.Frontend,
+ FrontendOpt: solveOpt.FrontendAttrs,
+ }
+ if def != nil {
+ sreq.Definition = def.ToPB()
+ }
+ resp, err := c.Build(ctx, solveOpt, "buildctl", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ _, isSubRequest := sreq.FrontendOpt["requestid"]
+ if isSubRequest {
+ if _, ok := sreq.FrontendOpt["frontend.caps"]; !ok {
+ sreq.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests"
+ }
+ }
+ res, err := c.Solve(ctx, sreq)
+ if err != nil {
+ return nil, err
+ }
+ if isSubRequest && res != nil {
+ subMetadata = res.Metadata
+ }
+ return res, err
+ }, progresswriter.ResetTime(mw.WithPrefix("", false)).Status())
if err != nil {
return err
}
@@ -312,7 +357,20 @@ func buildAction(clicontext *cli.Context) error {
return pw.Err()
})
- return eg.Wait()
+ if err := eg.Wait(); err != nil {
+ return err
+ }
+
+ if txt, ok := subMetadata["result.txt"]; ok {
+ fmt.Print(string(txt))
+ } else {
+ for k, v := range subMetadata {
+ if strings.HasPrefix(k, "result.") {
+ fmt.Printf("%s\n%s\n", k, v)
+ }
+ }
+ }
+ return nil
}
func writeMetadataFile(filename string, exporterResponse map[string]string) error {
diff --git a/cmd/buildctl/build/exportcache.go b/cmd/buildctl/build/exportcache.go
index cf83897b8f34..ec8b6a24bc4e 100644
--- a/cmd/buildctl/build/exportcache.go
+++ b/cmd/buildctl/build/exportcache.go
@@ -20,12 +20,11 @@ func parseExportCacheCSV(s string) (client.CacheOptionsEntry, error) {
return ex, err
}
for _, field := range fields {
- parts := strings.SplitN(field, "=", 2)
- if len(parts) != 2 {
+ key, value, ok := strings.Cut(field, "=")
+ if !ok {
return ex, errors.Errorf("invalid value %s", field)
}
- key := strings.ToLower(parts[0])
- value := parts[1]
+ key = strings.ToLower(key)
switch key {
case "type":
ex.Type = value
@@ -39,37 +38,28 @@ func parseExportCacheCSV(s string) (client.CacheOptionsEntry, error) {
if _, ok := ex.Attrs["mode"]; !ok {
ex.Attrs["mode"] = "min"
}
+ if ex.Type == "gha" {
+ return loadGithubEnv(ex)
+ }
return ex, nil
}
-// ParseExportCache parses --export-cache (and legacy --export-cache-opt)
-func ParseExportCache(exportCaches, legacyExportCacheOpts []string) ([]client.CacheOptionsEntry, error) {
+// ParseExportCache parses --export-cache
+func ParseExportCache(exportCaches []string) ([]client.CacheOptionsEntry, error) {
var exports []client.CacheOptionsEntry
- if len(legacyExportCacheOpts) > 0 {
- if len(exportCaches) != 1 {
- return nil, errors.New("--export-cache-opt requires exactly single --export-cache")
- }
- }
for _, exportCache := range exportCaches {
legacy := !strings.Contains(exportCache, "type=")
if legacy {
- logrus.Warnf("--export-cache [ --export-cache-opt ]= is deprecated. Please use --export-cache type=registry,ref=[,]=[,=] instead")
- attrs, err := attrMap(legacyExportCacheOpts)
- if err != nil {
- return nil, err
- }
- if _, ok := attrs["mode"]; !ok {
- attrs["mode"] = "min"
- }
- attrs["ref"] = exportCache
+ // Deprecated since BuildKit v0.4.0, but no plan to remove: https://github.com/moby/buildkit/pull/2783#issuecomment-1093449772
+ logrus.Warnf("--export-cache [ is deprecated. Please use --export-cache type=registry,ref=][,]=[,=] instead")
exports = append(exports, client.CacheOptionsEntry{
- Type: "registry",
- Attrs: attrs,
+ Type: "registry",
+ Attrs: map[string]string{
+ "mode": "min",
+ "ref": exportCache,
+ },
})
} else {
- if len(legacyExportCacheOpts) > 0 {
- return nil, errors.New("--export-cache-opt is not supported for the specified --export-cache. Please use --export-cache type=,=[,=] instead")
- }
ex, err := parseExportCacheCSV(exportCache)
if err != nil {
return nil, err
diff --git a/cmd/buildctl/build/exportcache_test.go b/cmd/buildctl/build/exportcache_test.go
index 77a3b8aa255c..ccaa321735d9 100644
--- a/cmd/buildctl/build/exportcache_test.go
+++ b/cmd/buildctl/build/exportcache_test.go
@@ -9,10 +9,9 @@ import (
func TestParseExportCache(t *testing.T) {
type testCase struct {
- exportCaches []string // --export-cache
- legacyExportCacheOpts []string // --export-cache-opt (legacy)
- expected []client.CacheOptionsEntry
- expectedErr string
+ exportCaches []string // --export-cache
+ expected []client.CacheOptionsEntry
+ expectedErr string
}
testCases := []testCase{
{
@@ -28,28 +27,22 @@ func TestParseExportCache(t *testing.T) {
},
},
{
- exportCaches: []string{"example.com/foo/bar"},
- legacyExportCacheOpts: []string{"mode=max"},
+ exportCaches: []string{"example.com/foo/bar"},
expected: []client.CacheOptionsEntry{
{
Type: "registry",
Attrs: map[string]string{
"ref": "example.com/foo/bar",
- "mode": "max",
+ "mode": "min",
},
},
},
},
- {
- exportCaches: []string{"type=registry,ref=example.com/foo/bar"},
- legacyExportCacheOpts: []string{"mode=max"},
- expectedErr: "--export-cache-opt is not supported for the specified --export-cache",
- },
// TODO: test multiple exportCaches (valid for CLI but not supported by solver)
}
for _, tc := range testCases {
- ex, err := ParseExportCache(tc.exportCaches, tc.legacyExportCacheOpts)
+ ex, err := ParseExportCache(tc.exportCaches)
if tc.expectedErr == "" {
require.EqualValues(t, tc.expected, ex)
} else {
diff --git a/cmd/buildctl/build/importcache.go b/cmd/buildctl/build/importcache.go
index a300f327bbae..b91eb36490c9 100644
--- a/cmd/buildctl/build/importcache.go
+++ b/cmd/buildctl/build/importcache.go
@@ -20,12 +20,11 @@ func parseImportCacheCSV(s string) (client.CacheOptionsEntry, error) {
return im, err
}
for _, field := range fields {
- parts := strings.SplitN(field, "=", 2)
- if len(parts) != 2 {
+ key, value, ok := strings.Cut(field, "=")
+ if !ok {
return im, errors.Errorf("invalid value %s", field)
}
- key := strings.ToLower(parts[0])
- value := parts[1]
+ key = strings.ToLower(key)
switch key {
case "type":
im.Type = value
@@ -36,6 +35,9 @@ func parseImportCacheCSV(s string) (client.CacheOptionsEntry, error) {
if im.Type == "" {
return im, errors.New("--import-cache requires type=")
}
+ if im.Type == "gha" {
+ return loadGithubEnv(im)
+ }
return im, nil
}
@@ -45,6 +47,7 @@ func ParseImportCache(importCaches []string) ([]client.CacheOptionsEntry, error)
for _, importCache := range importCaches {
legacy := !strings.Contains(importCache, "type=")
if legacy {
+ // Deprecated since BuildKit v0.4.0, but no plan to remove: https://github.com/moby/buildkit/pull/2783#issuecomment-1093449772
logrus.Warn("--import-cache [ is deprecated. Please use --import-cache type=registry,ref=][,]=[,=] instead.")
imports = append(imports, client.CacheOptionsEntry{
Type: "registry",
diff --git a/cmd/buildctl/build/importcache_test.go b/cmd/buildctl/build/importcache_test.go
index 8dd18bcf16ab..ad175812d65e 100644
--- a/cmd/buildctl/build/importcache_test.go
+++ b/cmd/buildctl/build/importcache_test.go
@@ -48,7 +48,36 @@ func TestParseImportCache(t *testing.T) {
},
},
},
+ {
+ importCaches: []string{"type=gha,url=https://foo.bar,token=foo"},
+ expected: []client.CacheOptionsEntry{
+ {
+ Type: "gha",
+ Attrs: map[string]string{
+ "url": "https://foo.bar",
+ "token": "foo",
+ },
+ },
+ },
+ },
+ {
+ importCaches: []string{"type=gha"},
+ expected: []client.CacheOptionsEntry{
+ {
+ Type: "gha",
+ Attrs: map[string]string{
+ "url": "https://github.com/test", // Set from env below
+ "token": "bar", // Set from env below
+ },
+ },
+ },
+ },
}
+
+ // Set values for GitHub parse cache
+ t.Setenv("ACTIONS_CACHE_URL", "https://github.com/test")
+ t.Setenv("ACTIONS_RUNTIME_TOKEN", "bar")
+
for _, tc := range testCases {
im, err := ParseImportCache(tc.importCaches)
if tc.expectedErr == "" {
diff --git a/cmd/buildctl/build/ocilayout.go b/cmd/buildctl/build/ocilayout.go
new file mode 100644
index 000000000000..e611f5799691
--- /dev/null
+++ b/cmd/buildctl/build/ocilayout.go
@@ -0,0 +1,27 @@
+package build
+
+import (
+ "strings"
+
+ "github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/content/local"
+ "github.com/pkg/errors"
+)
+
+// ParseOCILayout parses --oci-layout
+func ParseOCILayout(layouts []string) (map[string]content.Store, error) {
+ contentStores := make(map[string]content.Store)
+ for _, idAndDir := range layouts {
+ parts := strings.SplitN(idAndDir, "=", 2)
+ if len(parts) != 2 {
+ return nil, errors.Errorf("oci-layout option must be 'id=path/to/layout', instead had invalid %s", idAndDir)
+ }
+ cs, err := local.NewStore(parts[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "oci-layout context at %s failed to initialize", parts[1])
+ }
+ contentStores[parts[0]] = cs
+ }
+
+ return contentStores, nil
+}
diff --git a/cmd/buildctl/build/opt.go b/cmd/buildctl/build/opt.go
index 93acc7dd23ac..3731b130b30d 100644
--- a/cmd/buildctl/build/opt.go
+++ b/cmd/buildctl/build/opt.go
@@ -1,27 +1,5 @@
package build
-import (
- "github.com/sirupsen/logrus"
-)
-
-func ParseOpt(opts, legacyFrontendOpts []string) (map[string]string, error) {
- m := make(map[string]string)
- if len(legacyFrontendOpts) > 0 {
- logrus.Warn("--frontend-opt = is deprecated. Please use --opt = instead.")
- legacy, err := attrMap(legacyFrontendOpts)
- if err != nil {
- return nil, err
- }
- for k, v := range legacy {
- m[k] = v
- }
- }
- modern, err := attrMap(opts)
- if err != nil {
- return nil, err
- }
- for k, v := range modern {
- m[k] = v
- }
- return m, nil
+func ParseOpt(opts []string) (map[string]string, error) {
+ return attrMap(opts)
}
diff --git a/cmd/buildctl/build/output.go b/cmd/buildctl/build/output.go
index 185266e1e1d5..abdd508b833f 100644
--- a/cmd/buildctl/build/output.go
+++ b/cmd/buildctl/build/output.go
@@ -4,6 +4,7 @@ import (
"encoding/csv"
"io"
"os"
+ "strconv"
"strings"
"github.com/containerd/console"
@@ -23,12 +24,11 @@ func parseOutputCSV(s string) (client.ExportEntry, error) {
return ex, err
}
for _, field := range fields {
- parts := strings.SplitN(field, "=", 2)
- if len(parts) != 2 {
+ key, value, ok := strings.Cut(field, "=")
+ if !ok {
return ex, errors.Errorf("invalid value %s", field)
}
- key := strings.ToLower(parts[0])
- value := parts[1]
+ key = strings.ToLower(key)
switch key {
case "type":
ex.Type = value
@@ -42,7 +42,7 @@ func parseOutputCSV(s string) (client.ExportEntry, error) {
if v, ok := ex.Attrs["output"]; ok {
return ex, errors.Errorf("output=%s not supported for --output, you meant dest=%s?", v, v)
}
- ex.Output, ex.OutputDir, err = resolveExporterDest(ex.Type, ex.Attrs["dest"])
+ ex.Output, ex.OutputDir, err = resolveExporterDest(ex.Type, ex.Attrs["dest"], ex.Attrs)
if err != nil {
return ex, errors.Wrap(err, "invalid output option: output")
}
@@ -65,42 +65,36 @@ func ParseOutput(exports []string) ([]client.ExportEntry, error) {
return entries, nil
}
-// ParseLegacyExporter parses legacy --exporter --exporter-opt =
-func ParseLegacyExporter(legacyExporter string, legacyExporterOpts []string) ([]client.ExportEntry, error) {
- var ex client.ExportEntry
- ex.Type = legacyExporter
- var err error
- ex.Attrs, err = attrMap(legacyExporterOpts)
- if err != nil {
- return nil, errors.Wrap(err, "invalid exporter-opt")
- }
- if v, ok := ex.Attrs["dest"]; ok {
- return nil, errors.Errorf("dest=%s not supported for --exporter-opt, you meant output=%s?", v, v)
- }
- ex.Output, ex.OutputDir, err = resolveExporterDest(ex.Type, ex.Attrs["output"])
- if err != nil {
- return nil, errors.Wrap(err, "invalid exporter option: output")
- }
- if ex.Output != nil || ex.OutputDir != "" {
- delete(ex.Attrs, "output")
- }
- return []client.ExportEntry{ex}, nil
-}
-
// resolveExporterDest returns at most either one of io.WriteCloser (single file) or a string (directory path).
-func resolveExporterDest(exporter, dest string) (func(map[string]string) (io.WriteCloser, error), string, error) {
+func resolveExporterDest(exporter, dest string, attrs map[string]string) (func(map[string]string) (io.WriteCloser, error), string, error) {
wrapWriter := func(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
return func(m map[string]string) (io.WriteCloser, error) {
return wc, nil
}
}
+
+ var supportFile bool
+ var supportDir bool
switch exporter {
case client.ExporterLocal:
+ supportDir = true
+ case client.ExporterTar:
+ supportFile = true
+ case client.ExporterOCI, client.ExporterDocker:
+ tar, err := strconv.ParseBool(attrs["tar"])
+ if err != nil {
+ tar = true
+ }
+ supportFile = tar
+ supportDir = !tar
+ }
+
+ if supportDir {
if dest == "" {
- return nil, "", errors.New("output directory is required for local exporter")
+ return nil, "", errors.Errorf("output directory is required for %s exporter", exporter)
}
return nil, dest, nil
- case client.ExporterOCI, client.ExporterDocker, client.ExporterTar:
+ } else if supportFile {
if dest != "" && dest != "-" {
fi, err := os.Stat(dest)
if err != nil && !errors.Is(err, os.ErrNotExist) {
@@ -117,7 +111,8 @@ func resolveExporterDest(exporter, dest string) (func(map[string]string) (io.Wri
return nil, "", errors.Errorf("output file is required for %s exporter. refusing to write to console", exporter)
}
return wrapWriter(os.Stdout), "", nil
- default: // e.g. client.ExporterImage
+ } else {
+ // e.g. client.ExporterImage
if dest != "" {
return nil, "", errors.Errorf("output %s is not supported by %s exporter", dest, exporter)
}
diff --git a/cmd/buildctl/build/secret.go b/cmd/buildctl/build/secret.go
index 7d91a439073d..a41892b10e30 100644
--- a/cmd/buildctl/build/secret.go
+++ b/cmd/buildctl/build/secret.go
@@ -26,8 +26,8 @@ func ParseSecret(sl []string) (session.Attachable, error) {
return secretsprovider.NewSecretProvider(store), nil
}
-func parseSecret(value string) (*secretsprovider.Source, error) {
- csvReader := csv.NewReader(strings.NewReader(value))
+func parseSecret(val string) (*secretsprovider.Source, error) {
+ csvReader := csv.NewReader(strings.NewReader(val))
fields, err := csvReader.Read()
if err != nil {
return nil, errors.Wrap(err, "failed to parse csv secret")
@@ -37,14 +37,11 @@ func parseSecret(value string) (*secretsprovider.Source, error) {
var typ string
for _, field := range fields {
- parts := strings.SplitN(field, "=", 2)
- key := strings.ToLower(parts[0])
-
- if len(parts) != 2 {
+ key, value, ok := strings.Cut(field, "=")
+ if !ok {
return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
}
-
- value := parts[1]
+ key = strings.ToLower(key)
switch key {
case "type":
if value != "file" && value != "env" {
diff --git a/cmd/buildctl/build/util.go b/cmd/buildctl/build/util.go
new file mode 100644
index 000000000000..4dfe63289791
--- /dev/null
+++ b/cmd/buildctl/build/util.go
@@ -0,0 +1,33 @@
+package build
+
+import (
+ "os"
+
+ "github.com/pkg/errors"
+
+ "github.com/moby/buildkit/client"
+)
+
+// loadGithubEnv verify that url and token attributes exists in the
+// cache.
+// If not, it will search for $ACTIONS_RUNTIME_TOKEN and $ACTIONS_CACHE_URL
+// environments variables and add it to cache Options
+// Since it works for both import and export
+func loadGithubEnv(cache client.CacheOptionsEntry) (client.CacheOptionsEntry, error) {
+ if _, ok := cache.Attrs["url"]; !ok {
+ url, ok := os.LookupEnv("ACTIONS_CACHE_URL")
+ if !ok {
+ return cache, errors.New("cache with type gha requires url parameter or $ACTIONS_CACHE_URL")
+ }
+ cache.Attrs["url"] = url
+ }
+
+ if _, ok := cache.Attrs["token"]; !ok {
+ token, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN")
+ if !ok {
+ return cache, errors.New("cache with type gha requires token parameter or $ACTIONS_RUNTIME_TOKEN")
+ }
+ cache.Attrs["token"] = token
+ }
+ return cache, nil
+}
diff --git a/cmd/buildctl/build_test.go b/cmd/buildctl/build_test.go
index bde880c9095c..8fde945219e8 100644
--- a/cmd/buildctl/build_test.go
+++ b/cmd/buildctl/build_test.go
@@ -6,7 +6,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -25,10 +24,10 @@ import (
func testBuildWithLocalFiles(t *testing.T, sb integration.Sandbox) {
dir, err := tmpdir(
+ t,
fstest.CreateFile("foo", []byte("bar"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
st := llb.Image("busybox").
Run(llb.Shlex("sh -c 'echo -n bar > foo2'")).
@@ -55,17 +54,15 @@ func testBuildLocalExporter(t *testing.T, sb integration.Sandbox) {
rdr, err := marshal(sb.Context(), out)
require.NoError(t, err)
- tmpdir, err := ioutil.TempDir("", "buildkit-buildctl")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
- cmd := sb.Cmd(fmt.Sprintf("build --progress=plain --exporter=local --exporter-opt output=%s", tmpdir))
+ cmd := sb.Cmd(fmt.Sprintf("build --progress=plain --output type=local,dest=%s", tmpdir))
cmd.Stdin = rdr
err = cmd.Run()
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(tmpdir, "foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "bar")
}
@@ -86,8 +83,7 @@ func testBuildContainerdExporter(t *testing.T, sb integration.Sandbox) {
buildCmd := []string{
"build", "--progress=plain",
- "--exporter=image", "--exporter-opt", "unpack=true",
- "--exporter-opt", "name=" + imageName,
+ "--output", "type=image,unpack=true,name=" + imageName,
}
cmd := sb.Cmd(strings.Join(buildCmd, " "))
@@ -121,9 +117,7 @@ func testBuildMetadataFile(t *testing.T, sb integration.Sandbox) {
rdr, err := marshal(sb.Context(), st.Root())
require.NoError(t, err)
- tmpDir, err := ioutil.TempDir("", "buildkit-buildctl")
- require.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ tmpDir := t.TempDir()
imageName := "example.com/moby/metadata:test"
metadataFile := filepath.Join(tmpDir, "metadata.json")
@@ -140,7 +134,7 @@ func testBuildMetadataFile(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
require.FileExists(t, metadataFile)
- metadataBytes, err := ioutil.ReadFile(metadataFile)
+ metadataBytes, err := os.ReadFile(metadataFile)
require.NoError(t, err)
var metadata map[string]interface{}
@@ -192,11 +186,8 @@ func marshal(ctx context.Context, st llb.State) (io.Reader, error) {
return bytes.NewBuffer(dt), nil
}
-func tmpdir(appliers ...fstest.Applier) (string, error) {
- tmpdir, err := ioutil.TempDir("", "buildkit-buildctl")
- if err != nil {
- return "", err
- }
+func tmpdir(t *testing.T, appliers ...fstest.Applier) (string, error) {
+ tmpdir := t.TempDir()
if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil {
return "", err
}
diff --git a/cmd/buildctl/buildctl_test.go b/cmd/buildctl/buildctl_test.go
index 66fef818f919..feda648090c3 100644
--- a/cmd/buildctl/buildctl_test.go
+++ b/cmd/buildctl/buildctl_test.go
@@ -2,7 +2,6 @@ package main
import (
"encoding/json"
- "io/ioutil"
"os"
"path"
"testing"
@@ -37,9 +36,7 @@ func testUsage(t *testing.T, sb integration.Sandbox) {
}
func TestWriteMetadataFile(t *testing.T) {
- tmpdir, err := os.MkdirTemp("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(tmpdir)
+ tmpdir := t.TempDir()
cases := []struct {
name string
@@ -120,7 +117,7 @@ func TestWriteMetadataFile(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
fname := path.Join(tmpdir, "metadata_"+tt.name)
require.NoError(t, writeMetadataFile(fname, tt.exporterResponse))
- current, err := ioutil.ReadFile(fname)
+ current, err := os.ReadFile(fname)
require.NoError(t, err)
var raw map[string]interface{}
require.NoError(t, json.Unmarshal(current, &raw))
diff --git a/cmd/buildctl/common/common.go b/cmd/buildctl/common/common.go
index e1013160a41e..3a691dc0bac7 100644
--- a/cmd/buildctl/common/common.go
+++ b/cmd/buildctl/common/common.go
@@ -1,10 +1,14 @@
package common
import (
+ "bytes"
"context"
+ "encoding/json"
"net/url"
"os"
"path/filepath"
+ "strings"
+ "text/template"
"time"
"github.com/moby/buildkit/client"
@@ -88,3 +92,25 @@ func ResolveClient(c *cli.Context) (*client.Client, error) {
return client.New(ctx, c.GlobalString("addr"), opts...)
}
+
+func ParseTemplate(format string) (*template.Template, error) {
+ // aliases is from https://github.com/containerd/nerdctl/blob/v0.17.1/cmd/nerdctl/fmtutil.go#L116-L126 (Apache License 2.0)
+ aliases := map[string]string{
+ "json": "{{json .}}",
+ }
+ if alias, ok := aliases[format]; ok {
+ format = alias
+ }
+ // funcs is from https://github.com/docker/cli/blob/v20.10.12/templates/templates.go#L12-L20 (Apache License 2.0)
+ funcs := template.FuncMap{
+ "json": func(v interface{}) string {
+ buf := &bytes.Buffer{}
+ enc := json.NewEncoder(buf)
+ enc.SetEscapeHTML(false)
+ enc.Encode(v)
+ // Remove the trailing new line added by the encoder
+ return strings.TrimSpace(buf.String())
+ },
+ }
+ return template.New("").Funcs(funcs).Parse(format)
+}
diff --git a/cmd/buildctl/debug.go b/cmd/buildctl/debug.go
index a6a33b6f8882..d7d735ef72d5 100644
--- a/cmd/buildctl/debug.go
+++ b/cmd/buildctl/debug.go
@@ -12,5 +12,10 @@ var debugCommand = cli.Command{
debug.DumpLLBCommand,
debug.DumpMetadataCommand,
debug.WorkersCommand,
+ debug.InfoCommand,
+ debug.MonitorCommand,
+ debug.LogsCommand,
+ debug.CtlCommand,
+ debug.GetCommand,
},
}
diff --git a/cmd/buildctl/debug/ctl.go b/cmd/buildctl/debug/ctl.go
new file mode 100644
index 000000000000..2ebbe6ca92ec
--- /dev/null
+++ b/cmd/buildctl/debug/ctl.go
@@ -0,0 +1,67 @@
+package debug
+
+import (
+ controlapi "github.com/moby/buildkit/api/services/control"
+ bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ "github.com/moby/buildkit/util/appcontext"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+)
+
+var CtlCommand = cli.Command{
+ Name: "ctl",
+ Usage: "control build records",
+ Action: ctl,
+ Flags: []cli.Flag{
+ cli.BoolFlag{
+ Name: "pin",
+ Usage: "Pin build so it will not be garbage collected",
+ },
+ cli.BoolFlag{
+ Name: "unpin",
+ Usage: "Unpin build so it will be garbage collected",
+ },
+ cli.BoolFlag{
+ Name: "delete",
+ Usage: "Delete build record",
+ },
+ },
+}
+
+func ctl(clicontext *cli.Context) error {
+ args := clicontext.Args()
+ if len(args) == 0 {
+ return errors.Errorf("build ref must be specified")
+ }
+ ref := args[0]
+
+ c, err := bccommon.ResolveClient(clicontext)
+ if err != nil {
+ return err
+ }
+
+ ctx := appcontext.Context()
+
+ pin := clicontext.Bool("pin")
+ unpin := clicontext.Bool("unpin")
+ del := clicontext.Bool("delete")
+
+ if !pin && !unpin && !del {
+ return errors.Errorf("must specify one of --pin, --unpin, --delete")
+ }
+
+ if pin && unpin {
+ return errors.Errorf("cannot specify both --pin and --unpin")
+ }
+
+ if del && (pin || unpin) {
+ return errors.Errorf("cannot specify --delete with --pin or --unpin")
+ }
+
+ _, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
+ Ref: ref,
+ Pinned: pin,
+ Delete: del,
+ })
+ return err
+}
diff --git a/cmd/buildctl/debug/dumpmetadata.go b/cmd/buildctl/debug/dumpmetadata.go
index 3b7e948b3990..b53fc25fd578 100644
--- a/cmd/buildctl/debug/dumpmetadata.go
+++ b/cmd/buildctl/debug/dumpmetadata.go
@@ -2,7 +2,6 @@ package debug
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -41,7 +40,7 @@ var DumpMetadataCommand = cli.Command{
}
func findMetadataDBFiles(root string) ([]string, error) {
- dirs, err := ioutil.ReadDir(root)
+ dirs, err := os.ReadDir(root)
if err != nil {
return nil, err
}
diff --git a/cmd/buildctl/debug/get.go b/cmd/buildctl/debug/get.go
new file mode 100644
index 000000000000..e19ee275f346
--- /dev/null
+++ b/cmd/buildctl/debug/get.go
@@ -0,0 +1,54 @@
+package debug
+
+import (
+ "io"
+ "os"
+
+ "github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/content/proxy"
+ bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ "github.com/moby/buildkit/util/appcontext"
+ digest "github.com/opencontainers/go-digest"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+)
+
+var GetCommand = cli.Command{
+ Name: "get",
+ Usage: "retrieve a blob from contentstore",
+ Action: get,
+}
+
+func get(clicontext *cli.Context) error {
+ args := clicontext.Args()
+ if len(args) == 0 {
+ return errors.Errorf("blob digest must be specified")
+ }
+
+ dgst, err := digest.Parse(args[0])
+ if err != nil {
+ return err
+ }
+
+ c, err := bccommon.ResolveClient(clicontext)
+ if err != nil {
+ return err
+ }
+
+ ctx := appcontext.Context()
+
+ store := proxy.NewContentStore(c.ContentClient())
+ ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
+ Digest: dgst,
+ })
+ if err != nil {
+ return err
+ }
+ defer ra.Close()
+
+ // use 1MB buffer like we do for ingesting
+ buf := make([]byte, 1<<20)
+ _, err = io.CopyBuffer(os.Stdout, content.NewReader(ra), buf)
+ return err
+}
diff --git a/cmd/buildctl/debug/info.go b/cmd/buildctl/debug/info.go
new file mode 100644
index 000000000000..3f702ff3aa0b
--- /dev/null
+++ b/cmd/buildctl/debug/info.go
@@ -0,0 +1,48 @@
+package debug
+
+import (
+ "fmt"
+ "os"
+ "text/tabwriter"
+
+ bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ "github.com/urfave/cli"
+)
+
+var InfoCommand = cli.Command{
+ Name: "info",
+ Usage: "display internal information",
+ Action: info,
+ Flags: []cli.Flag{
+ cli.StringFlag{
+ Name: "format",
+ Usage: "Format the output using the given Go template, e.g, '{{json .}}'",
+ },
+ },
+}
+
+func info(clicontext *cli.Context) error {
+ c, err := bccommon.ResolveClient(clicontext)
+ if err != nil {
+ return err
+ }
+ res, err := c.Info(bccommon.CommandContext(clicontext))
+ if err != nil {
+ return err
+ }
+ if format := clicontext.String("format"); format != "" {
+ tmpl, err := bccommon.ParseTemplate(format)
+ if err != nil {
+ return err
+ }
+ if err := tmpl.Execute(clicontext.App.Writer, res); err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(clicontext.App.Writer, "\n")
+ return err
+ }
+
+ w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
+ _, _ = fmt.Fprintf(w, "BuildKit:\t%s %s %s\n", res.BuildkitVersion.Package, res.BuildkitVersion.Version, res.BuildkitVersion.Revision)
+ return w.Flush()
+}
diff --git a/cmd/buildctl/debug/logs.go b/cmd/buildctl/debug/logs.go
new file mode 100644
index 000000000000..61693f4df141
--- /dev/null
+++ b/cmd/buildctl/debug/logs.go
@@ -0,0 +1,111 @@
+package debug
+
+import (
+ "io"
+ "os"
+
+ "github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/content/proxy"
+ controlapi "github.com/moby/buildkit/api/services/control"
+ "github.com/moby/buildkit/client"
+ bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ "github.com/moby/buildkit/util/appcontext"
+ "github.com/moby/buildkit/util/progress/progresswriter"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/urfave/cli"
+)
+
+var LogsCommand = cli.Command{
+ Name: "logs",
+ Usage: "display build logs",
+ Action: logs,
+ Flags: []cli.Flag{
+ cli.StringFlag{
+ Name: "progress",
+ Usage: "progress output type",
+ Value: "auto",
+ },
+ cli.BoolFlag{
+ Name: "trace",
+ Usage: "show opentelemetry trace",
+ },
+ },
+}
+
+func logs(clicontext *cli.Context) error {
+ args := clicontext.Args()
+ if len(args) == 0 {
+ return errors.Errorf("build ref must be specified")
+ }
+ ref := args[0]
+
+ c, err := bccommon.ResolveClient(clicontext)
+ if err != nil {
+ return err
+ }
+
+ ctx := appcontext.Context()
+
+ if clicontext.Bool("trace") {
+ cl, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
+ Ref: ref,
+ })
+ if err != nil {
+ return err
+ }
+ he, err := cl.Recv()
+ if err != nil {
+ if err == io.EOF {
+ return errors.Errorf("ref %s not found", ref)
+ }
+ return err
+ }
+ if he.Record.Trace == nil {
+ return errors.Errorf("ref %s does not have trace", ref)
+ }
+ store := proxy.NewContentStore(c.ContentClient())
+ ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
+ Digest: he.Record.Trace.Digest,
+ Size: he.Record.Trace.Size_,
+ MediaType: he.Record.Trace.MediaType,
+ })
+ if err != nil {
+ return err
+ }
+ defer ra.Close()
+
+ // use 1MB buffer like we do for ingesting
+ buf := make([]byte, 1<<20)
+ _, err = io.CopyBuffer(os.Stdout, content.NewReader(ra), buf)
+ return err
+ }
+
+ cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
+ Ref: ref,
+ })
+ if err != nil {
+ return err
+ }
+
+ pw, err := progresswriter.NewPrinter(ctx, os.Stdout, clicontext.String("progress"))
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ <-pw.Done()
+ }()
+
+ for {
+ resp, err := cl.Recv()
+ if err != nil {
+ close(pw.Status())
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ return err
+ }
+ pw.Status() <- client.NewSolveStatus(resp)
+ }
+}
diff --git a/cmd/buildctl/debug/monitor.go b/cmd/buildctl/debug/monitor.go
new file mode 100644
index 000000000000..2cf83e9326bf
--- /dev/null
+++ b/cmd/buildctl/debug/monitor.go
@@ -0,0 +1,78 @@
+package debug
+
+import (
+ "fmt"
+
+ controlapi "github.com/moby/buildkit/api/services/control"
+ bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ "github.com/moby/buildkit/util/appcontext"
+ "github.com/urfave/cli"
+)
+
+var MonitorCommand = cli.Command{
+ Name: "monitor",
+ Usage: "display build events",
+ Action: monitor,
+ Flags: []cli.Flag{
+ cli.BoolFlag{
+ Name: "completed",
+ Usage: "show completed builds",
+ },
+ cli.StringFlag{
+ Name: "ref",
+ Usage: "show events for a specific build",
+ },
+ },
+}
+
+func monitor(clicontext *cli.Context) error {
+ c, err := bccommon.ResolveClient(clicontext)
+ if err != nil {
+ return err
+ }
+ completed := clicontext.Bool("completed")
+
+ ctx := appcontext.Context()
+
+ cl, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
+ ActiveOnly: !completed,
+ Ref: clicontext.String("ref"),
+ })
+ if err != nil {
+ return err
+ }
+
+ for {
+ ev, err := cl.Recv()
+ if err != nil {
+ return err
+ }
+ fmt.Printf("event: %s ref:%s\n", ev.Type.String(), ev.Record.Ref)
+ if ev.Record.NumTotalSteps != 0 {
+ fmt.Printf(" cache: %d/%d\n", ev.Record.NumCachedSteps, ev.Record.NumTotalSteps)
+ }
+ if ev.Record.Logs != nil {
+ fmt.Printf(" logs: %s\n", ev.Record.Logs)
+ }
+ if ev.Record.Trace != nil {
+ fmt.Printf(" trace: %s\n", ev.Record.Trace)
+ }
+
+ if ev.Record.Result != nil {
+ if ev.Record.Result.Result != nil {
+ fmt.Printf(" descriptor: %s\n", ev.Record.Result.Result)
+ }
+ for _, att := range ev.Record.Result.Attestations {
+ fmt.Printf(" attestation: %s\n", att)
+ }
+ }
+ for k, res := range ev.Record.Results {
+ if res.Result != nil {
+ fmt.Printf(" [%s] descriptor: %s\n", k, res.Result)
+ }
+ for _, att := range res.Attestations {
+ fmt.Printf(" [%s] attestation: %s\n", k, att)
+ }
+ }
+ }
+}
diff --git a/cmd/buildctl/debug/workers.go b/cmd/buildctl/debug/workers.go
index feacf4beefbe..9a68d34d532b 100644
--- a/cmd/buildctl/debug/workers.go
+++ b/cmd/buildctl/debug/workers.go
@@ -1,15 +1,12 @@
package debug
import (
- "bytes"
"context"
- "encoding/json"
"fmt"
"os"
"sort"
"strings"
"text/tabwriter"
- "text/template"
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/client"
@@ -54,7 +51,7 @@ func listWorkers(clicontext *cli.Context) error {
if clicontext.Bool("verbose") {
logrus.Debug("Ignoring --verbose")
}
- tmpl, err := parseTemplate(format)
+ tmpl, err := bccommon.ParseTemplate(format)
if err != nil {
return err
}
@@ -79,6 +76,7 @@ func printWorkersVerbose(tw *tabwriter.Writer, winfo []*client.WorkerInfo) {
for _, wi := range winfo {
fmt.Fprintf(tw, "ID:\t%s\n", wi.ID)
fmt.Fprintf(tw, "Platforms:\t%s\n", joinPlatforms(wi.Platforms))
+ fmt.Fprintf(tw, "BuildKit:\t%s %s %s\n", wi.BuildkitVersion.Package, wi.BuildkitVersion.Version, wi.BuildkitVersion.Revision)
fmt.Fprintf(tw, "Labels:\n")
for _, k := range sortedKeys(wi.Labels) {
v := wi.Labels[k]
@@ -136,25 +134,3 @@ func joinPlatforms(p []ocispecs.Platform) string {
}
return strings.Join(str, ",")
}
-
-func parseTemplate(format string) (*template.Template, error) {
- // aliases is from https://github.com/containerd/nerdctl/blob/v0.17.1/cmd/nerdctl/fmtutil.go#L116-L126 (Apache License 2.0)
- aliases := map[string]string{
- "json": "{{json .}}",
- }
- if alias, ok := aliases[format]; ok {
- format = alias
- }
- // funcs is from https://github.com/docker/cli/blob/v20.10.12/templates/templates.go#L12-L20 (Apache License 2.0)
- funcs := template.FuncMap{
- "json": func(v interface{}) string {
- buf := &bytes.Buffer{}
- enc := json.NewEncoder(buf)
- enc.SetEscapeHTML(false)
- enc.Encode(v)
- // Remove the trailing new line added by the encoder
- return strings.TrimSpace(buf.String())
- },
- }
- return template.New("").Funcs(funcs).Parse(format)
-}
diff --git a/cmd/buildctl/diskusage.go b/cmd/buildctl/diskusage.go
index 48f64cfa4cf4..8c00fcfe4e77 100644
--- a/cmd/buildctl/diskusage.go
+++ b/cmd/buildctl/diskusage.go
@@ -9,6 +9,7 @@ import (
"github.com/moby/buildkit/client"
bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ "github.com/sirupsen/logrus"
"github.com/tonistiigi/units"
"github.com/urfave/cli"
)
@@ -26,6 +27,10 @@ var diskUsageCommand = cli.Command{
Name: "verbose, v",
Usage: "Verbose output",
},
+ cli.StringFlag{
+ Name: "format",
+ Usage: "Format the output using the given Go template, e.g, '{{json .}}'",
+ },
},
}
@@ -40,6 +45,21 @@ func diskUsage(clicontext *cli.Context) error {
return err
}
+ if format := clicontext.String("format"); format != "" {
+ if clicontext.Bool("verbose") {
+ logrus.Debug("Ignoring --verbose")
+ }
+ tmpl, err := bccommon.ParseTemplate(format)
+ if err != nil {
+ return err
+ }
+ if err := tmpl.Execute(clicontext.App.Writer, du); err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(clicontext.App.Writer, "\n")
+ return err
+ }
+
tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
if clicontext.Bool("verbose") {
diff --git a/cmd/buildctl/main.go b/cmd/buildctl/main.go
index a4780517ae67..8203fc9d725a 100644
--- a/cmd/buildctl/main.go
+++ b/cmd/buildctl/main.go
@@ -7,6 +7,7 @@ import (
_ "github.com/moby/buildkit/client/connhelper/dockercontainer"
_ "github.com/moby/buildkit/client/connhelper/kubepod"
_ "github.com/moby/buildkit/client/connhelper/podmancontainer"
+ _ "github.com/moby/buildkit/client/connhelper/ssh"
bccommon "github.com/moby/buildkit/cmd/buildctl/common"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/util/apicaps"
diff --git a/cmd/buildctl/prune.go b/cmd/buildctl/prune.go
index fc3adeaf0e4a..31c917262514 100644
--- a/cmd/buildctl/prune.go
+++ b/cmd/buildctl/prune.go
@@ -7,6 +7,7 @@ import (
"github.com/moby/buildkit/client"
bccommon "github.com/moby/buildkit/cmd/buildctl/common"
+ "github.com/sirupsen/logrus"
"github.com/tonistiigi/units"
"github.com/urfave/cli"
)
@@ -36,6 +37,10 @@ var pruneCommand = cli.Command{
Name: "verbose, v",
Usage: "Verbose output",
},
+ cli.StringFlag{
+ Name: "format",
+ Usage: "Format the output using the given Go template, e.g, '{{json .}}'",
+ },
},
}
@@ -47,27 +52,7 @@ func prune(clicontext *cli.Context) error {
ch := make(chan client.UsageInfo)
printed := make(chan struct{})
-
- tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
- first := true
- total := int64(0)
-
- go func() {
- defer close(printed)
- for du := range ch {
- total += du.Size
- if clicontext.Bool("verbose") {
- printVerbose(tw, []*client.UsageInfo{&du})
- } else {
- if first {
- printTableHeader(tw)
- first = false
- }
- printTableRow(tw, &du)
- tw.Flush()
- }
- }
- }()
+ var summarizer func()
opts := []client.PruneOption{
client.WithFilter(clicontext.StringSlice("filter")),
@@ -78,16 +63,61 @@ func prune(clicontext *cli.Context) error {
opts = append(opts, client.PruneAll)
}
+ if format := clicontext.String("format"); format != "" {
+ if clicontext.Bool("verbose") {
+ logrus.Debug("Ignoring --verbose")
+ }
+ tmpl, err := bccommon.ParseTemplate(format)
+ if err != nil {
+ return err
+ }
+ go func() {
+ defer close(printed)
+ for du := range ch {
+ // Unlike `buildctl du`, the template is applied to a UsageInfo, not to a slice of UsageInfo
+ if err := tmpl.Execute(clicontext.App.Writer, du); err != nil {
+ panic(err)
+ }
+ if _, err = fmt.Fprintf(clicontext.App.Writer, "\n"); err != nil {
+ panic(err)
+ }
+ }
+ }()
+ } else {
+ tw := tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
+ first := true
+ total := int64(0)
+ go func() {
+ defer close(printed)
+ for du := range ch {
+ total += du.Size
+ if clicontext.Bool("verbose") {
+ printVerbose(tw, []*client.UsageInfo{&du})
+ } else {
+ if first {
+ printTableHeader(tw)
+ first = false
+ }
+ printTableRow(tw, &du)
+ tw.Flush()
+ }
+ }
+ }()
+ summarizer = func() {
+ tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
+ fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
+ tw.Flush()
+ }
+ }
+
err = c.Prune(bccommon.CommandContext(clicontext), ch, opts...)
close(ch)
<-printed
if err != nil {
return err
}
-
- tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
- fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
- tw.Flush()
-
+ if summarizer != nil {
+ summarizer()
+ }
return nil
}
diff --git a/cmd/buildkitd/config/config.go b/cmd/buildkitd/config/config.go
index 7ee7b577d582..1734d5e1567e 100644
--- a/cmd/buildkitd/config/config.go
+++ b/cmd/buildkitd/config/config.go
@@ -24,6 +24,8 @@ type Config struct {
Registries map[string]resolverconfig.RegistryConfig `toml:"registry"`
DNS *DNSConfig `toml:"dns"`
+
+ History *HistoryConfig `toml:"history"`
}
type GRPCConfig struct {
@@ -53,6 +55,7 @@ type NetworkConfig struct {
Mode string `toml:"networkMode"`
CNIConfigPath string `toml:"cniConfigPath"`
CNIBinaryPath string `toml:"cniBinaryPath"`
+ CNIPoolSize int `toml:"cniPoolSize"`
}
type OCIConfig struct {
@@ -81,6 +84,9 @@ type OCIConfig struct {
// The profile should already be loaded (by a higher level system) before creating a worker.
ApparmorProfile string `toml:"apparmor-profile"`
+ // SELinux enables applying SELinux labels.
+ SELinux bool `toml:"selinux"`
+
// MaxParallelism is the maximum number of parallel build steps that can be run at the same time.
MaxParallelism int `toml:"max-parallelism"`
}
@@ -99,6 +105,9 @@ type ContainerdConfig struct {
// The profile should already be loaded (by a higher level system) before creating a worker.
ApparmorProfile string `toml:"apparmor-profile"`
+ // SELinux enables applying SELinux labels.
+ SELinux bool `toml:"selinux"`
+
MaxParallelism int `toml:"max-parallelism"`
Rootless bool `toml:"rootless"`
@@ -116,3 +125,8 @@ type DNSConfig struct {
Options []string `toml:"options"`
SearchDomains []string `toml:"searchDomains"`
}
+
+type HistoryConfig struct {
+ MaxAge int64 `toml:"maxAge"`
+ MaxEntries int64 `toml:"maxEntries"`
+}
diff --git a/cmd/buildkitd/constants_unix.go b/cmd/buildkitd/constants_unix.go
new file mode 100644
index 000000000000..8463a07745e0
--- /dev/null
+++ b/cmd/buildkitd/constants_unix.go
@@ -0,0 +1,8 @@
+//go:build !windows
+// +build !windows
+
+package main
+
+const (
+ defaultContainerdAddress = "/run/containerd/containerd.sock"
+)
diff --git a/cmd/buildkitd/constants_windows.go b/cmd/buildkitd/constants_windows.go
new file mode 100644
index 000000000000..eb411d3f807c
--- /dev/null
+++ b/cmd/buildkitd/constants_windows.go
@@ -0,0 +1,5 @@
+package main
+
+const (
+ defaultContainerdAddress = "//./pipe/containerd-containerd"
+)
diff --git a/cmd/buildkitd/main.go b/cmd/buildkitd/main.go
index 126ba0dbe2c8..ca411066b30f 100644
--- a/cmd/buildkitd/main.go
+++ b/cmd/buildkitd/main.go
@@ -5,7 +5,6 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
- "io/ioutil"
"net"
"os"
"os/user"
@@ -25,10 +24,12 @@ import (
"github.com/gofrs/flock"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/moby/buildkit/cache/remotecache"
+ "github.com/moby/buildkit/cache/remotecache/azblob"
"github.com/moby/buildkit/cache/remotecache/gha"
inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
localremotecache "github.com/moby/buildkit/cache/remotecache/local"
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
+ s3remotecache "github.com/moby/buildkit/cache/remotecache/s3"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/control"
@@ -58,6 +59,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
+ "go.etcd.io/bbolt"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/otel/propagation"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
@@ -75,6 +77,9 @@ func init() {
if reexec.Init() {
os.Exit(0)
}
+
+ // enable in memory recording for buildkitd traces
+ detect.Recorder = detect.NewTraceRecorder()
}
var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})
@@ -264,6 +269,7 @@ func main() {
if err != nil {
return err
}
+ defer controller.Close()
controller.Register(server)
@@ -277,7 +283,7 @@ func main() {
case "network.host":
cfg.Entitlements = append(cfg.Entitlements, e)
default:
- return fmt.Errorf("invalid entitlement : %v", e)
+ return errors.Errorf("invalid entitlement : %s", e)
}
}
}
@@ -383,10 +389,10 @@ func setDefaultNetworkConfig(nc config.NetworkConfig) config.NetworkConfig {
nc.Mode = "auto"
}
if nc.CNIConfigPath == "" {
- nc.CNIConfigPath = "/etc/buildkit/cni.json"
+ nc.CNIConfigPath = appdefaults.DefaultCNIConfigPath
}
if nc.CNIBinaryPath == "" {
- nc.CNIBinaryPath = "/opt/cni/bin"
+ nc.CNIBinaryPath = appdefaults.DefaultCNIBinDir
}
return nc
}
@@ -565,7 +571,10 @@ func unaryInterceptor(globalCtx context.Context, tp trace.TracerProvider) grpc.U
resp, err = withTrace(ctx, req, info, handler)
if err != nil {
- logrus.Errorf("%s returned error: %+v", info.FullMethod, stack.Formatter(err))
+ logrus.Errorf("%s returned error: %v", info.FullMethod, err)
+ if logrus.GetLevel() >= logrus.DebugLevel {
+ fmt.Fprintf(os.Stderr, "%+v", stack.Formatter(grpcerrors.FromGRPC(err)))
+ }
}
return
}
@@ -594,7 +603,7 @@ func serverCredentials(cfg config.TLSConfig) (*tls.Config, error) {
}
if caFile != "" {
certPool := x509.NewCertPool()
- ca, err := ioutil.ReadFile(caFile)
+ ca, err := os.ReadFile(caFile)
if err != nil {
return nil, errors.Wrap(err, "could not read ca certificate")
}
@@ -623,7 +632,8 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err
if tc != nil {
traceSocket = filepath.Join(cfg.Root, "otel-grpc.sock")
if err := runTraceController(traceSocket, tc); err != nil {
- return nil, err
+ logrus.Warnf("failed set up otel-grpc controller: %v", err)
+ traceSocket = ""
}
}
@@ -644,6 +654,11 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err
return nil, err
}
+ historyDB, err := bbolt.Open(filepath.Join(cfg.Root, "history.db"), 0600, nil)
+ if err != nil {
+ return nil, err
+ }
+
resolverFn := resolverFunc(cfg)
w, err := wc.GetDefault()
@@ -656,13 +671,16 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err
"local": localremotecache.ResolveCacheExporterFunc(sessionManager),
"inline": inlineremotecache.ResolveCacheExporterFunc(),
"gha": gha.ResolveCacheExporterFunc(),
+ "s3": s3remotecache.ResolveCacheExporterFunc(),
+ "azblob": azblob.ResolveCacheExporterFunc(),
}
remoteCacheImporterFuncs := map[string]remotecache.ResolveCacheImporterFunc{
"registry": registryremotecache.ResolveCacheImporterFunc(sessionManager, w.ContentStore(), resolverFn),
"local": localremotecache.ResolveCacheImporterFunc(sessionManager),
"gha": gha.ResolveCacheImporterFunc(),
+ "s3": s3remotecache.ResolveCacheImporterFunc(),
+ "azblob": azblob.ResolveCacheImporterFunc(),
}
-
return control.NewController(control.Opt{
SessionManager: sessionManager,
WorkerController: wc,
@@ -672,6 +690,10 @@ func newController(c *cli.Context, cfg *config.Config) (*control.Controller, err
CacheKeyStorage: cacheStorage,
Entitlements: cfg.Entitlements,
TraceCollector: tc,
+ HistoryDB: historyDB,
+ LeaseManager: w.LeaseManager(),
+ ContentStore: w.ContentStore(),
+ HistoryConfig: cfg.History,
})
}
@@ -760,6 +782,14 @@ func getGCPolicy(cfg config.GCConfig, root string) []client.PruneInfo {
return out
}
+func getBuildkitVersion() client.BuildkitVersion {
+ return client.BuildkitVersion{
+ Package: version.Package,
+ Version: version.Version,
+ Revision: version.Revision,
+ }
+}
+
func getDNSConfig(cfg *config.DNSConfig) *oci.DNSConfig {
var dns *oci.DNSConfig
if cfg != nil {
@@ -774,7 +804,7 @@ func getDNSConfig(cfg *config.DNSConfig) *oci.DNSConfig {
// parseBoolOrAuto returns (nil, nil) if s is "auto"
func parseBoolOrAuto(s string) (*bool, error) {
- if s == "" || strings.ToLower(s) == "auto" {
+ if s == "" || strings.EqualFold(s, "auto") {
return nil, nil
}
b, err := strconv.ParseBool(s)
diff --git a/cmd/buildkitd/main_containerd_worker.go b/cmd/buildkitd/main_containerd_worker.go
index 00079676b1bb..7992b1bf87ac 100644
--- a/cmd/buildkitd/main_containerd_worker.go
+++ b/cmd/buildkitd/main_containerd_worker.go
@@ -25,7 +25,6 @@ import (
)
const (
- defaultContainerdAddress = "/run/containerd/containerd.sock"
defaultContainerdNamespace = "buildkit"
)
@@ -90,6 +89,11 @@ func init() {
Usage: "path of cni binary files",
Value: defaultConf.Workers.Containerd.NetworkConfig.CNIBinaryPath,
},
+ cli.IntFlag{
+ Name: "containerd-cni-pool-size",
+ Usage: "size of cni network namespace pool",
+ Value: defaultConf.Workers.Containerd.NetworkConfig.CNIPoolSize,
+ },
cli.StringFlag{
Name: "containerd-worker-snapshotter",
Usage: "snapshotter name to use",
@@ -99,6 +103,10 @@ func init() {
Name: "containerd-worker-apparmor-profile",
Usage: "set the name of the apparmor profile applied to containers",
},
+ cli.BoolFlag{
+ Name: "containerd-worker-selinux",
+ Usage: "apply SELinux labels",
+ },
}
n := "containerd-worker-rootless"
u := "enable rootless mode"
@@ -208,6 +216,9 @@ func applyContainerdFlags(c *cli.Context, cfg *config.Config) error {
if c.GlobalIsSet("containerd-cni-config-path") {
cfg.Workers.Containerd.NetworkConfig.CNIConfigPath = c.GlobalString("containerd-cni-config-path")
}
+ if c.GlobalIsSet("containerd-cni-pool-size") {
+ cfg.Workers.Containerd.NetworkConfig.CNIPoolSize = c.GlobalInt("containerd-cni-pool-size")
+ }
if c.GlobalIsSet("containerd-cni-binary-dir") {
cfg.Workers.Containerd.NetworkConfig.CNIBinaryPath = c.GlobalString("containerd-cni-binary-dir")
}
@@ -217,6 +228,9 @@ func applyContainerdFlags(c *cli.Context, cfg *config.Config) error {
if c.GlobalIsSet("containerd-worker-apparmor-profile") {
cfg.Workers.Containerd.ApparmorProfile = c.GlobalString("containerd-worker-apparmor-profile")
}
+ if c.GlobalIsSet("containerd-worker-selinux") {
+ cfg.Workers.Containerd.SELinux = c.GlobalBool("containerd-worker-selinux")
+ }
return nil
}
@@ -228,7 +242,7 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([
cfg := common.config.Workers.Containerd
- if (cfg.Enabled == nil && !validContainerdSocket(cfg.Address)) || (cfg.Enabled != nil && !*cfg.Enabled) {
+ if (cfg.Enabled == nil && !validContainerdSocket(cfg)) || (cfg.Enabled != nil && !*cfg.Enabled) {
return nil, nil
}
@@ -247,6 +261,7 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([
Root: common.config.Root,
ConfigPath: common.config.Workers.Containerd.CNIConfigPath,
BinaryDir: common.config.Workers.Containerd.CNIBinaryPath,
+ PoolSize: common.config.Workers.Containerd.CNIPoolSize,
},
}
@@ -259,11 +274,12 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([
if cfg.Snapshotter != "" {
snapshotter = cfg.Snapshotter
}
- opt, err := containerd.NewWorkerOpt(common.config.Root, cfg.Address, snapshotter, cfg.Namespace, cfg.Rootless, cfg.Labels, dns, nc, common.config.Workers.Containerd.ApparmorProfile, parallelismSem, common.traceSocket, ctd.WithTimeout(60*time.Second))
+ opt, err := containerd.NewWorkerOpt(common.config.Root, cfg.Address, snapshotter, cfg.Namespace, cfg.Rootless, cfg.Labels, dns, nc, common.config.Workers.Containerd.ApparmorProfile, common.config.Workers.Containerd.SELinux, parallelismSem, common.traceSocket, ctd.WithTimeout(60*time.Second))
if err != nil {
return nil, err
}
opt.GCPolicy = getGCPolicy(cfg.GCConfig, common.config.Root)
+ opt.BuildkitVersion = getBuildkitVersion()
opt.RegistryHosts = resolverFunc(common.config)
if platformsStr := cfg.Platforms; len(platformsStr) != 0 {
@@ -280,7 +296,8 @@ func containerdWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([
return []worker.Worker{w}, nil
}
-func validContainerdSocket(socket string) bool {
+func validContainerdSocket(cfg config.ContainerdConfig) bool {
+ socket := cfg.Address
if strings.HasPrefix(socket, "tcp://") {
// FIXME(AkihiroSuda): prohibit tcp?
return true
@@ -291,6 +308,14 @@ func validContainerdSocket(socket string) bool {
logrus.Warnf("skipping containerd worker, as %q does not exist", socketPath)
return false
}
- // TODO: actually dial and call introspection API
+ c, err := ctd.New(socketPath, ctd.WithDefaultNamespace(cfg.Namespace))
+ if err != nil {
+ logrus.Warnf("skipping containerd worker, as failed to connect client to %q: %v", socketPath, err)
+ return false
+ }
+ if _, err := c.Server(context.Background()); err != nil {
+ logrus.Warnf("skipping containerd worker, as failed to call introspection API on %q: %v", socketPath, err)
+ return false
+ }
return true
}
diff --git a/cmd/buildkitd/main_oci_worker.go b/cmd/buildkitd/main_oci_worker.go
index a2cdaa8113d7..64cedacea156 100644
--- a/cmd/buildkitd/main_oci_worker.go
+++ b/cmd/buildkitd/main_oci_worker.go
@@ -27,6 +27,7 @@ import (
fuseoverlayfs "github.com/containerd/fuse-overlayfs-snapshotter"
sgzfs "github.com/containerd/stargz-snapshotter/fs"
sgzconf "github.com/containerd/stargz-snapshotter/fs/config"
+ sgzlayer "github.com/containerd/stargz-snapshotter/fs/layer"
sgzsource "github.com/containerd/stargz-snapshotter/fs/source"
remotesn "github.com/containerd/stargz-snapshotter/snapshot"
"github.com/moby/buildkit/cmd/buildkitd/config"
@@ -100,6 +101,11 @@ func init() {
Usage: "path of cni binary files",
Value: defaultConf.Workers.OCI.NetworkConfig.CNIBinaryPath,
},
+ cli.IntFlag{
+ Name: "oci-cni-pool-size",
+ Usage: "size of cni network namespace pool",
+ Value: defaultConf.Workers.OCI.NetworkConfig.CNIPoolSize,
+ },
cli.StringFlag{
Name: "oci-worker-binary",
Usage: "name of specified oci worker binary",
@@ -109,6 +115,10 @@ func init() {
Name: "oci-worker-apparmor-profile",
Usage: "set the name of the apparmor profile applied to containers",
},
+ cli.BoolFlag{
+ Name: "oci-worker-selinux",
+ Usage: "apply SELinux labels",
+ },
}
n := "oci-worker-rootless"
u := "enable rootless mode"
@@ -222,6 +232,9 @@ func applyOCIFlags(c *cli.Context, cfg *config.Config) error {
if c.GlobalIsSet("oci-cni-binary-dir") {
cfg.Workers.OCI.NetworkConfig.CNIBinaryPath = c.GlobalString("oci-cni-binary-dir")
}
+ if c.GlobalIsSet("oci-cni-pool-size") {
+ cfg.Workers.OCI.NetworkConfig.CNIPoolSize = c.GlobalInt("oci-cni-pool-size")
+ }
if c.GlobalIsSet("oci-worker-binary") {
cfg.Workers.OCI.Binary = c.GlobalString("oci-worker-binary")
}
@@ -231,6 +244,10 @@ func applyOCIFlags(c *cli.Context, cfg *config.Config) error {
if c.GlobalIsSet("oci-worker-apparmor-profile") {
cfg.Workers.OCI.ApparmorProfile = c.GlobalString("oci-worker-apparmor-profile")
}
+ if c.GlobalIsSet("oci-worker-selinux") {
+ cfg.Workers.OCI.SELinux = c.GlobalBool("oci-worker-selinux")
+ }
+
return nil
}
@@ -281,6 +298,7 @@ func ociWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker
Root: common.config.Root,
ConfigPath: common.config.Workers.OCI.CNIConfigPath,
BinaryDir: common.config.Workers.OCI.CNIBinaryPath,
+ PoolSize: common.config.Workers.OCI.CNIPoolSize,
},
}
@@ -289,11 +307,12 @@ func ociWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker
parallelismSem = semaphore.NewWeighted(int64(cfg.MaxParallelism))
}
- opt, err := runc.NewWorkerOpt(common.config.Root, snFactory, cfg.Rootless, processMode, cfg.Labels, idmapping, nc, dns, cfg.Binary, cfg.ApparmorProfile, parallelismSem, common.traceSocket, cfg.DefaultCgroupParent)
+ opt, err := runc.NewWorkerOpt(common.config.Root, snFactory, cfg.Rootless, processMode, cfg.Labels, idmapping, nc, dns, cfg.Binary, cfg.ApparmorProfile, cfg.SELinux, parallelismSem, common.traceSocket, cfg.DefaultCgroupParent)
if err != nil {
return nil, err
}
opt.GCPolicy = getGCPolicy(cfg.GCConfig, common.config.Root)
+ opt.BuildkitVersion = getBuildkitVersion()
opt.RegistryHosts = hosts
if platformsStr := cfg.Platforms; len(platformsStr) != 0 {
@@ -391,11 +410,20 @@ func snapshotterFactory(commonRoot string, cfg config.OCIConfig, sm *session.Man
}
}
snFactory.New = func(root string) (ctdsnapshot.Snapshotter, error) {
+ userxattr, err := overlayutils.NeedsUserXAttr(root)
+ if err != nil {
+ logrus.WithError(err).Warnf("cannot detect whether \"userxattr\" option needs to be used, assuming to be %v", userxattr)
+ }
+ opq := sgzlayer.OverlayOpaqueTrusted
+ if userxattr {
+ opq = sgzlayer.OverlayOpaqueUser
+ }
fs, err := sgzfs.NewFilesystem(filepath.Join(root, "stargz"),
sgzCfg,
// Source info based on the buildkit's registry config and session
sgzfs.WithGetSources(sourceWithSession(hosts, sm)),
sgzfs.WithMetricsLogLevel(logrus.DebugLevel),
+ sgzfs.WithOverlayOpaqueType(opq),
)
if err != nil {
return nil, err
diff --git a/cmd/buildkitd/util_linux.go b/cmd/buildkitd/util_linux.go
index a089f1f95af2..cfbc0a5c99c3 100644
--- a/cmd/buildkitd/util_linux.go
+++ b/cmd/buildkitd/util_linux.go
@@ -22,9 +22,9 @@ func parseIdentityMapping(str string) (*idtools.IdentityMapping, error) {
logrus.Debugf("user namespaces: ID ranges will be mapped to subuid ranges of: %s", username)
- mappings, err := idtools.NewIdentityMapping(username)
+ mappings, err := idtools.LoadIdentityMapping(username)
if err != nil {
return nil, errors.Wrap(err, "failed to create ID mappings")
}
- return mappings, nil
+ return &mappings, nil
}
diff --git a/control/control.go b/control/control.go
index 0d3e7976e5b7..2bd06db2576b 100644
--- a/control/control.go
+++ b/control/control.go
@@ -2,34 +2,49 @@ package control
import (
"context"
+ "fmt"
+ "strconv"
"sync"
"sync/atomic"
"time"
- "github.com/moby/buildkit/util/bklog"
-
+ contentapi "github.com/containerd/containerd/api/services/content/v1"
+ "github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/leases"
+ "github.com/containerd/containerd/services/content/contentserver"
+ "github.com/docker/distribution/reference"
+ "github.com/mitchellh/hashstructure/v2"
controlapi "github.com/moby/buildkit/api/services/control"
apitypes "github.com/moby/buildkit/api/types"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/cmd/buildkitd/config"
controlgateway "github.com/moby/buildkit/control/gateway"
"github.com/moby/buildkit/exporter"
+ "github.com/moby/buildkit/exporter/util/epoch"
"github.com/moby/buildkit/frontend"
+ "github.com/moby/buildkit/frontend/attestations"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/grpchijack"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/solver/llbsolver"
+ "github.com/moby/buildkit/solver/llbsolver/proc"
"github.com/moby/buildkit/solver/pb"
+ "github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/util/throttle"
"github.com/moby/buildkit/util/tracing/transform"
+ "github.com/moby/buildkit/version"
"github.com/moby/buildkit/worker"
+ digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
+ "go.etcd.io/bbolt"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
tracev1 "go.opentelemetry.io/proto/otlp/collector/trace/v1"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
@@ -42,6 +57,10 @@ type Opt struct {
ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
Entitlements []string
TraceCollector sdktrace.SpanExporter
+ HistoryDB *bbolt.DB
+ LeaseManager leases.Manager
+ ContentStore content.Store
+ HistoryConfig *config.HistoryConfig
}
type Controller struct { // TODO: ControlService
@@ -49,6 +68,7 @@ type Controller struct { // TODO: ControlService
buildCount int64
opt Opt
solver *llbsolver.Solver
+ history *llbsolver.HistoryQueue
cache solver.CacheManager
gatewayForwarder *controlgateway.GatewayForwarder
throttledGC func()
@@ -61,14 +81,31 @@ func NewController(opt Opt) (*Controller, error) {
gatewayForwarder := controlgateway.NewGatewayForwarder()
- solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements)
+ hq := llbsolver.NewHistoryQueue(llbsolver.HistoryQueueOpt{
+ DB: opt.HistoryDB,
+ LeaseManager: opt.LeaseManager,
+ ContentStore: opt.ContentStore,
+ CleanConfig: opt.HistoryConfig,
+ })
+
+ s, err := llbsolver.New(llbsolver.Opt{
+ WorkerController: opt.WorkerController,
+ Frontends: opt.Frontends,
+ CacheManager: cache,
+ CacheResolvers: opt.ResolveCacheImporterFuncs,
+ GatewayForwarder: gatewayForwarder,
+ SessionManager: opt.SessionManager,
+ Entitlements: opt.Entitlements,
+ HistoryQueue: hq,
+ })
if err != nil {
return nil, errors.Wrap(err, "failed to create solver")
}
c := &Controller{
opt: opt,
- solver: solver,
+ solver: s,
+ history: hq,
cache: cache,
gatewayForwarder: gatewayForwarder,
}
@@ -81,11 +118,17 @@ func NewController(opt Opt) (*Controller, error) {
return c, nil
}
-func (c *Controller) Register(server *grpc.Server) error {
+func (c *Controller) Close() error {
+ return c.opt.WorkerController.Close()
+}
+
+func (c *Controller) Register(server *grpc.Server) {
controlapi.RegisterControlServer(server, c)
c.gatewayForwarder.Register(server)
tracev1.RegisterTraceServiceServer(server, c)
- return nil
+
+ store := &roContentStore{c.opt.ContentStore}
+ contentapi.RegisterContentServer(server, contentserver.New(store))
}
func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) {
@@ -205,6 +248,34 @@ func (c *Controller) Export(ctx context.Context, req *tracev1.ExportTraceService
return &tracev1.ExportTraceServiceResponse{}, nil
}
+func (c *Controller) ListenBuildHistory(req *controlapi.BuildHistoryRequest, srv controlapi.Control_ListenBuildHistoryServer) error {
+ if err := sendTimestampHeader(srv); err != nil {
+ return err
+ }
+ return c.history.Listen(srv.Context(), req, func(h *controlapi.BuildHistoryEvent) error {
+ if err := srv.Send(h); err != nil {
+ return err
+ }
+ return nil
+ })
+}
+
+func (c *Controller) UpdateBuildHistory(ctx context.Context, req *controlapi.UpdateBuildHistoryRequest) (*controlapi.UpdateBuildHistoryResponse, error) {
+ if !req.Delete {
+ err := c.history.UpdateRef(ctx, req.Ref, func(r *controlapi.BuildHistoryRecord) error {
+ if req.Pinned == r.Pinned {
+ return nil
+ }
+ r.Pinned = req.Pinned
+ return nil
+ })
+ return &controlapi.UpdateBuildHistoryResponse{}, err
+ }
+
+ err := c.history.Delete(ctx, req.Ref)
+ return &controlapi.UpdateBuildHistoryResponse{}, err
+}
+
func translateLegacySolveRequest(req *controlapi.SolveRequest) error {
// translates ExportRef and ExportAttrs to new Exports (v0.4.0)
if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" {
@@ -255,6 +326,26 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
if err != nil {
return nil, err
}
+
+ // if SOURCE_DATE_EPOCH is set, enable it for the exporter
+ if v, ok := epoch.ParseBuildArgs(req.FrontendAttrs); ok {
+ if _, ok := req.ExporterAttrs[epoch.KeySourceDateEpoch]; !ok {
+ if req.ExporterAttrs == nil {
+ req.ExporterAttrs = make(map[string]string)
+ }
+ req.ExporterAttrs[epoch.KeySourceDateEpoch] = v
+ }
+ }
+
+ if v, ok := req.FrontendAttrs["build-arg:BUILDKIT_BUILDINFO"]; ok && v != "" {
+ if _, ok := req.ExporterAttrs["buildinfo"]; !ok {
+ if req.ExporterAttrs == nil {
+ req.ExporterAttrs = make(map[string]string)
+ }
+ req.ExporterAttrs["buildinfo"] = v
+ }
+ }
+
if req.Exporter != "" {
exp, err := w.Exporter(req.Exporter, c.opt.SessionManager)
if err != nil {
@@ -266,32 +357,42 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
}
}
- var (
- cacheExporter remotecache.Exporter
- cacheExportMode solver.CacheExportMode
- cacheImports []frontend.CacheOptionsEntry
- )
- if len(req.Cache.Exports) > 1 {
- // TODO(AkihiroSuda): this should be fairly easy
- return nil, errors.New("specifying multiple cache exports is not supported currently")
+ if c, err := findDuplicateCacheOptions(req.Cache.Exports); err != nil {
+ return nil, err
+ } else if c != nil {
+ types := []string{}
+ for _, c := range c {
+ types = append(types, c.Type)
+ }
+ return nil, errors.Errorf("duplicate cache exports %s", types)
}
-
- if len(req.Cache.Exports) == 1 {
- e := req.Cache.Exports[0]
+ var cacheExporters []llbsolver.RemoteCacheExporter
+ for _, e := range req.Cache.Exports {
cacheExporterFunc, ok := c.opt.ResolveCacheExporterFuncs[e.Type]
if !ok {
return nil, errors.Errorf("unknown cache exporter: %q", e.Type)
}
- cacheExporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs)
+ var exp llbsolver.RemoteCacheExporter
+ exp.Exporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs)
if err != nil {
- return nil, err
+ return nil, errors.Wrapf(err, "failed to configure %v cache exporter", e.Type)
}
if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported {
bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"])
} else {
- cacheExportMode = exportMode
+ exp.CacheExportMode = exportMode
+ }
+ if ignoreErrorStr, ok := e.Attrs["ignore-error"]; ok {
+ if ignoreError, supported := parseCacheExportIgnoreError(ignoreErrorStr); !supported {
+ bklog.G(ctx).Debugf("skipping invalid cache export ignore-error: %s", e.Attrs["ignore-error"])
+ } else {
+ exp.IgnoreError = ignoreError
+ }
}
+ cacheExporters = append(cacheExporters, exp)
}
+
+ var cacheImports []frontend.CacheOptionsEntry
for _, im := range req.Cache.Imports {
cacheImports = append(cacheImports, frontend.CacheOptionsEntry{
Type: im.Type,
@@ -299,6 +400,36 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
})
}
+ attests, err := attestations.Parse(req.FrontendAttrs)
+ if err != nil {
+ return nil, err
+ }
+
+ var procs []llbsolver.Processor
+
+ if attrs, ok := attests["sbom"]; ok {
+ src := attrs["generator"]
+ if src == "" {
+ return nil, errors.Errorf("sbom generator cannot be empty")
+ }
+ ref, err := reference.ParseNormalizedNamed(src)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse sbom generator %s", src)
+ }
+
+ useCache := true
+ if v, ok := req.FrontendAttrs["no-cache"]; ok && v == "" {
+ // disable cache if cache is disabled for all stages
+ useCache = false
+ }
+ ref = reference.TagNameOnly(ref)
+ procs = append(procs, proc.SBOMProcessor(ref.String(), useCache))
+ }
+
+ if attrs, ok := attests["provenance"]; ok {
+ procs = append(procs, proc.ProvenanceProcessor(attrs))
+ }
+
resp, err := c.solver.Solve(ctx, req.Ref, req.Session, frontend.SolveRequest{
Frontend: req.Frontend,
Definition: req.Definition,
@@ -306,10 +437,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
FrontendInputs: req.FrontendInputs,
CacheImports: cacheImports,
}, llbsolver.ExporterRequest{
- Exporter: expi,
- CacheExporter: cacheExporter,
- CacheExportMode: cacheExportMode,
- }, req.Entitlements)
+ Exporter: expi,
+ CacheExporters: cacheExporters,
+ Type: req.Exporter,
+ Attrs: req.ExporterAttrs,
+ }, req.Entitlements, procs, req.Internal, req.SourcePolicy)
if err != nil {
return nil, err
}
@@ -319,6 +451,9 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*
}
func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error {
+ if err := sendTimestampHeader(stream); err != nil {
+ return err
+ }
ch := make(chan *client.SolveStatus, 8)
eg, ctx := errgroup.WithContext(stream.Context())
@@ -332,68 +467,10 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con
if !ok {
return nil
}
- logSize := 0
- for {
- retry := false
- sr := controlapi.StatusResponse{}
- for _, v := range ss.Vertexes {
- sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{
- Digest: v.Digest,
- Inputs: v.Inputs,
- Name: v.Name,
- Started: v.Started,
- Completed: v.Completed,
- Error: v.Error,
- Cached: v.Cached,
- ProgressGroup: v.ProgressGroup,
- })
- }
- for _, v := range ss.Statuses {
- sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{
- ID: v.ID,
- Vertex: v.Vertex,
- Name: v.Name,
- Current: v.Current,
- Total: v.Total,
- Timestamp: v.Timestamp,
- Started: v.Started,
- Completed: v.Completed,
- })
- }
- for i, v := range ss.Logs {
- sr.Logs = append(sr.Logs, &controlapi.VertexLog{
- Vertex: v.Vertex,
- Stream: int64(v.Stream),
- Msg: v.Data,
- Timestamp: v.Timestamp,
- })
- logSize += len(v.Data) + emptyLogVertexSize
- // avoid logs growing big and split apart if they do
- if logSize > 1024*1024 {
- ss.Vertexes = nil
- ss.Statuses = nil
- ss.Logs = ss.Logs[i+1:]
- retry = true
- break
- }
- }
- for _, v := range ss.Warnings {
- sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{
- Vertex: v.Vertex,
- Level: int64(v.Level),
- Short: v.Short,
- Detail: v.Detail,
- Info: v.SourceInfo,
- Ranges: v.Range,
- Url: v.URL,
- })
- }
- if err := stream.SendMsg(&sr); err != nil {
+ for _, sr := range ss.Marshal() {
+ if err := stream.SendMsg(sr); err != nil {
return err
}
- if !retry {
- break
- }
}
}
})
@@ -426,15 +503,26 @@ func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersR
}
for _, w := range workers {
resp.Record = append(resp.Record, &apitypes.WorkerRecord{
- ID: w.ID(),
- Labels: w.Labels(),
- Platforms: pb.PlatformsFromSpec(w.Platforms(true)),
- GCPolicy: toPBGCPolicy(w.GCPolicy()),
+ ID: w.ID(),
+ Labels: w.Labels(),
+ Platforms: pb.PlatformsFromSpec(w.Platforms(true)),
+ GCPolicy: toPBGCPolicy(w.GCPolicy()),
+ BuildkitVersion: toPBBuildkitVersion(w.BuildkitVersion()),
})
}
return resp, nil
}
+func (c *Controller) Info(ctx context.Context, r *controlapi.InfoRequest) (*controlapi.InfoResponse, error) {
+ return &controlapi.InfoResponse{
+ BuildkitVersion: &apitypes.BuildkitVersion{
+ Package: version.Package,
+ Version: version.Version,
+ Revision: version.Revision,
+ },
+ }, nil
+}
+
func (c *Controller) gc() {
c.gcmu.Lock()
defer c.gcmu.Unlock()
@@ -488,6 +576,14 @@ func parseCacheExportMode(mode string) (solver.CacheExportMode, bool) {
return solver.CacheExportModeMin, false
}
+func parseCacheExportIgnoreError(ignoreErrorStr string) (bool, bool) {
+ ignoreError, err := strconv.ParseBool(ignoreErrorStr)
+ if err != nil {
+ return false, false
+ }
+ return ignoreError, true
+}
+
func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy {
policy := make([]*apitypes.GCPolicy, 0, len(in))
for _, p := range in {
@@ -500,3 +596,76 @@ func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy {
}
return policy
}
+
+func toPBBuildkitVersion(in client.BuildkitVersion) *apitypes.BuildkitVersion {
+ return &apitypes.BuildkitVersion{
+ Package: in.Package,
+ Version: in.Version,
+ Revision: in.Revision,
+ }
+}
+
+func findDuplicateCacheOptions(cacheOpts []*controlapi.CacheOptionsEntry) ([]*controlapi.CacheOptionsEntry, error) {
+ seen := map[string]*controlapi.CacheOptionsEntry{}
+ duplicate := map[string]struct{}{}
+ for _, opt := range cacheOpts {
+ k, err := cacheOptKey(*opt)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := seen[k]; ok {
+ duplicate[k] = struct{}{}
+ }
+ seen[k] = opt
+ }
+
+ var duplicates []*controlapi.CacheOptionsEntry
+ for k := range duplicate {
+ duplicates = append(duplicates, seen[k])
+ }
+ return duplicates, nil
+}
+
+func cacheOptKey(opt controlapi.CacheOptionsEntry) (string, error) {
+ if opt.Type == "registry" && opt.Attrs["ref"] != "" {
+ return opt.Attrs["ref"], nil
+ }
+ var rawOpt = struct {
+ Type string
+ Attrs map[string]string
+ }{
+ Type: opt.Type,
+ Attrs: opt.Attrs,
+ }
+ hash, err := hashstructure.Hash(rawOpt, hashstructure.FormatV2, nil)
+ if err != nil {
+ return "", err
+ }
+ return fmt.Sprint(opt.Type, ":", hash), nil
+}
+
+type roContentStore struct {
+ content.Store
+}
+
+func (cs *roContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
+ return nil, errors.Errorf("read-only content store")
+}
+
+func (cs *roContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
+ return errors.Errorf("read-only content store")
+}
+
+func (cs *roContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
+ return content.Info{}, errors.Errorf("read-only content store")
+}
+
+func (cs *roContentStore) Abort(ctx context.Context, ref string) error {
+ return errors.Errorf("read-only content store")
+}
+
+const timestampKey = "buildkit-current-timestamp"
+
+func sendTimestampHeader(srv grpc.ServerStream) error {
+ return srv.SendHeader(metadata.Pairs(timestampKey, time.Now().Format(time.RFC3339Nano)))
+}
diff --git a/control/control_test.go b/control/control_test.go
new file mode 100644
index 000000000000..8707287e277a
--- /dev/null
+++ b/control/control_test.go
@@ -0,0 +1,148 @@
+package control
+
+import (
+ "testing"
+
+ controlapi "github.com/moby/buildkit/api/services/control"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDuplicateCacheOptions(t *testing.T) {
+ var testCases = []struct {
+ name string
+ opts []*controlapi.CacheOptionsEntry
+ expected []*controlapi.CacheOptionsEntry
+ }{
+ {
+ name: "avoids unique opts",
+ opts: []*controlapi.CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": "example.com/ref:v1.0.0",
+ },
+ },
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": "/path/for/export",
+ },
+ },
+ },
+ expected: nil,
+ },
+ {
+ name: "finds duplicate opts",
+ opts: []*controlapi.CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": "example.com/ref:v1.0.0",
+ },
+ },
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": "example.com/ref:v1.0.0",
+ },
+ },
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": "/path/for/export",
+ },
+ },
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": "/path/for/export",
+ },
+ },
+ },
+ expected: []*controlapi.CacheOptionsEntry{
+ {
+ Type: "registry",
+ Attrs: map[string]string{
+ "ref": "example.com/ref:v1.0.0",
+ },
+ },
+ {
+ Type: "local",
+ Attrs: map[string]string{
+ "dest": "/path/for/export",
+ },
+ },
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ result, err := findDuplicateCacheOptions(tc.opts)
+ require.NoError(t, err)
+ require.ElementsMatch(t, tc.expected, result)
+ })
+ }
+}
+
+func TestParseCacheExportIgnoreError(t *testing.T) {
+ tests := map[string]struct {
+ expectedIgnoreError bool
+ expectedSupported bool
+ }{
+ "": {
+ expectedIgnoreError: false,
+ expectedSupported: false,
+ },
+ ".": {
+ expectedIgnoreError: false,
+ expectedSupported: false,
+ },
+ "fake": {
+ expectedIgnoreError: false,
+ expectedSupported: false,
+ },
+ "true": {
+ expectedIgnoreError: true,
+ expectedSupported: true,
+ },
+ "True": {
+ expectedIgnoreError: true,
+ expectedSupported: true,
+ },
+ "TRUE": {
+ expectedIgnoreError: true,
+ expectedSupported: true,
+ },
+ "truee": {
+ expectedIgnoreError: false,
+ expectedSupported: false,
+ },
+ "false": {
+ expectedIgnoreError: false,
+ expectedSupported: true,
+ },
+ "False": {
+ expectedIgnoreError: false,
+ expectedSupported: true,
+ },
+ "FALSE": {
+ expectedIgnoreError: false,
+ expectedSupported: true,
+ },
+ "ffalse": {
+ expectedIgnoreError: false,
+ expectedSupported: false,
+ },
+ }
+
+ for ignoreErrStr, test := range tests {
+ t.Run(ignoreErrStr, func(t *testing.T) {
+ ignoreErr, supported := parseCacheExportIgnoreError(ignoreErrStr)
+ t.Log("checking expectedIgnoreError")
+ require.Equal(t, ignoreErr, test.expectedIgnoreError)
+ t.Log("checking expectedSupported")
+ require.Equal(t, supported, test.expectedSupported)
+ })
+ }
+}
diff --git a/control/gateway/gateway.go b/control/gateway/gateway.go
index 62c696d6c448..4451e022d322 100644
--- a/control/gateway/gateway.go
+++ b/control/gateway/gateway.go
@@ -111,6 +111,14 @@ func (gwf *GatewayForwarder) ReadFile(ctx context.Context, req *gwapi.ReadFileRe
return fwd.ReadFile(ctx, req)
}
+func (gwf *GatewayForwarder) Evaluate(ctx context.Context, req *gwapi.EvaluateRequest) (*gwapi.EvaluateResponse, error) {
+ fwd, err := gwf.lookupForwarder(ctx)
+ if err != nil {
+ return nil, errors.Wrap(err, "forwarding Evaluate")
+ }
+ return fwd.Evaluate(ctx, req)
+}
+
func (gwf *GatewayForwarder) Ping(ctx context.Context, req *gwapi.PingRequest) (*gwapi.PongResponse, error) {
fwd, err := gwf.lookupForwarder(ctx)
if err != nil {
diff --git a/control/init.go b/control/init.go
deleted file mode 100644
index 2e86133e4120..000000000000
--- a/control/init.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package control
-
-import controlapi "github.com/moby/buildkit/api/services/control"
-
-var emptyLogVertexSize int
-
-func init() {
- emptyLogVertex := controlapi.VertexLog{}
- emptyLogVertexSize = emptyLogVertex.Size()
-}
diff --git a/docs/annotations.md b/docs/annotations.md
new file mode 100644
index 000000000000..a37573eeef9a
--- /dev/null
+++ b/docs/annotations.md
@@ -0,0 +1,59 @@
+# Image annotations
+
+Buildkit supports attaching [OCI annotations](https://github.com/opencontainers/image-spec/blob/main/annotations.md)
+to its built image manifests and indexes. These annotations can be used to
+attach additional metadata to a built image, which may not be appropriate to
+store in the image content itself.
+
+Annotations are similar to, but not a replacement for image labels. Annotations
+can be attached at almost every level of the resulting image output, while
+labels can be only included in the image configuration object. Additionally,
+unless overridden, image labels are inherited by other images that use the
+image as a base.
+
+Annotations support multiple pre-defined annotation keys which you can use, or
+you can also create your own.
+
+To build an image with annotations, you can use the `image` or `oci` (and
+related) exporter types, along with the `annotation.*` option.
+
+For example, to attach a human-readable title to your image, you can use the
+following buildctl invocation:
+
+ buildctl build ... \
+ --opt platform=amd64,arm64 \
+ --output "type=image,name=target,annotation.org.opencontainers.image.title=Target"
+
+This annotation will be added to each built image manifest, so each platform
+you built for (in the above, `amd64` and `arm64`) will get a copy of the annotation.
+
+You want to allow different annotations for different platforms, e.g. maybe you
+want to provide a different documentation URL per manifest. You can do this
+with platform specific annotations, using the `annotation[].*` syntax
+like so:
+
+ buildctl build ... \
+ --opt platform=amd64,arm64 \
+ --output "type=image,name=target,annotation[linux/amd64].org.opencontainers.image.url=https://example.com/amd64,annotation[linux/arm64].org.opencontainers.image.url=https://example.com/arm64"
+
+Buildkit also allows you to finely control the exact destination where the
+annotation will be written to using the syntax `annotation-.*`. You can
+write to the following ``s:
+
+- The `manifest` (the default, as above)
+- The `manifest-descriptor`
+ - This adds the annotation into the image index's descriptor for the manifest
+ - (discarded if the output does not contain an image index)
+- The `index`
+ - This adds the annotation into the image index root
+ - (discarded if the output does not contain an image index)
+- The `index-descriptor`
+ - This adds the annotation into the OCI layout's descriptor for the index
+ - (discarded if the output does not contain an OCI layout)
+
+For example, if you want to add the annotation at the image index level, so
+that the annotation is shared between all architectures, you can instead:
+
+ buildctl build ... \
+ --opt platform=amd64,arm64 \
+ --output "type=image,name=target,annotation-index.org.opencontainers.image.title=Target Image"
diff --git a/docs/attestations/README.md b/docs/attestations/README.md
new file mode 100644
index 000000000000..b4e4c1e76449
--- /dev/null
+++ b/docs/attestations/README.md
@@ -0,0 +1,16 @@
+# Attestations
+
+BuildKit supports creating and attaching attestations to build artifacts.
+Generated attestations use the [in-toto attestation format](https://github.com/in-toto/attestation).
+
+The currently supported attestation types are:
+
+- [SBOMs](./sbom.md)
+- [SLSA Provenance](./slsa-provenance.md)
+
+Upon generation, attestations are attached differently to the export result:
+
+- For the `image`, `oci` and `docker` exporters, attestations are exported
+ using the attached [attestation storage](./attestation-storage.md).
+- For the `local` and `tar` exporters, attestations are written to separate
+ files within the output directory.
diff --git a/docs/attestations/attestation-storage.md b/docs/attestations/attestation-storage.md
new file mode 100644
index 000000000000..2df1d3668262
--- /dev/null
+++ b/docs/attestations/attestation-storage.md
@@ -0,0 +1,215 @@
+# Image Attestation Storage
+
+Buildkit supports creating and attaching attestations to build artifacts. These
+attestations can provide valuable information from the build process,
+including, but not limited to: [SBOMs](https://en.wikipedia.org/wiki/Software_supply_chain),
+[SLSA Provenance](https://slsa.dev/provenance), build logs, etc.
+
+This document describes the current custom format used to store attestations,
+which is designed to be compatible with current registry implementations today.
+In the future, we may support exporting attestations in additional formats.
+
+Attestations are stored as manifest objects in the image index, similar in
+style to OCI artifacts.
+
+## Properties
+
+### Attestation Manifest
+
+Attestation manifests are attached to the root image index object, under a
+separate [OCI image manifest](https://github.com/opencontainers/image-spec/blob/main/manifest.md).
+Each attestation manifest can contain multiple [attestation blobs](#attestation-blob),
+with all the of the attestations in a manifest applying to a single platform
+manifest. All properties of standard OCI and Docker manifests continue to
+apply.
+
+The image `config` descriptor will point to a valid [image config](https://github.com/opencontainers/image-spec/blob/main/config.md),
+however, it will not contain attestation-specific details, and should be
+ignored as it is only included for compatibility purposes.
+
+Each image layer in `layers` will contain a descriptor for a single
+[attestation blob](#attestation-blob). The `mediaType` of each layer will be
+set in accordance to its contents, one of:
+
+- `application/vnd.in-toto+json` (currently, the only supported option)
+
+ Indicates an in-toto attestation blob
+
+Any unknown `mediaType`s should be ignored.
+
+To assist attestation traversal, the following annotations may be set on each
+layer descriptor:
+
+- `in-toto.io/predicate-type`
+
+ This annotation will be set if the enclosed attestation is an in-toto
+ attestation (currently, the only supported option). The annotation will
+ be set to contain the same value as the `predicateType` property present
+ inside the attestation.
+
+ When present, this annotation may be used to find the specific attestation(s)
+ they are looking for to avoid pulling the contents of the others.
+
+### Attestation Blob
+
+The contents of each layer will be a blob dependent on it's `mediaType`.
+
+- `application/vnd.in-toto+json`
+
+ The blob contents will contain a full [in-toto attestation statement](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement):
+
+ ```json
+ {
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "subject": [
+ {
+ "name": "",
+ "digest": {"": ""}
+ },
+ ...
+ ],
+ "predicateType": "",
+ "predicate": { ... }
+ }
+ ```
+
+ The subject of the attestation should be set to be the same digest as the
+ target manifest described in the [Attestation Manifest Descriptor](#attestation-manifest-descriptor),
+ or some object within.
+
+### Attestation Manifest Descriptor
+
+Attestation manifests are attached to the root [image index](https://github.com/opencontainers/image-spec/blob/main/image-index.md),
+in the `manifests` key, after all the original runnable manifests. All
+properties of standard OCI and Docker manifest descriptors continue to apply.
+
+To prevent container runtimes from accidentally pulling or running the image
+described in the manifest, the `platform` property of the attestation manifest
+will be set to `unknown/unknown`, as follows:
+
+```json
+"platform": {
+ "architecture": "unknown",
+ "os": "unknown"
+}
+```
+
+To assist index traversal, the following annotations will be set on the
+manifest descriptor descriptor:
+
+- `vnd.docker.reference.type`
+
+ This annotation describes the type of the artifact, and will be set
+ to `attestation-manifest`. If any other value is specified, the entire
+ manifest should be ignored.
+
+- `vnd.docker.reference.digest`
+
+ This annotation will contain the digest of the object in the image index that
+ the attestation manifest refers to.
+
+ When present, this annotation can be used to find the matching attestation
+ manifest for a selected image manifest.
+
+## Examples
+
+*Example showing an SBOM attestation attached to a `linux/amd64` image*
+
+#### Image index (`sha256:94acc2ca70c40f3f6291681f37ce9c767e3d251ce01c7e4e9b98ccf148c26260`):
+
+This image index defines two descriptors: an AMD64 image `sha256:23678f31..` and an attestation manifest `sha256:02cb9aa7..` for that image.
+
+```json
+{
+ "mediaType": "application/vnd.oci.image.index.v1+json",
+ "schemaVersion": 2,
+ "manifests": [
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:23678f31b3b3586c4fb318aecfe64a96a1f0916ba8faf9b2be2abee63fa9e827",
+ "size": 1234,
+ "platform": {
+ "architecture": "amd64",
+ "os": "linux"
+ }
+ },
+ {
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "digest": "sha256:02cb9aa7600e73fcf41ee9f0f19cc03122b2d8be43d41ce4b21335118f5dd943",
+ "size": 1234,
+ "annotations": {
+ "vnd.docker.reference.digest": "sha256:23678f31b3b3586c4fb318aecfe64a96a1f0916ba8faf9b2be2abee63fa9e827",
+ "vnd.docker.reference.type": "attestation-manifest"
+ },
+ "platform": {
+ "architecture": "unknown",
+ "os": "unknown"
+ }
+ }
+ ]
+}
+```
+
+#### Attestation manifest (`sha256:02cb9aa7600e73fcf41ee9f0f19cc03122b2d8be43d41ce4b21335118f5dd943`):
+
+This attestation manifest contains one attestation that is an in-toto attestation that contains a "https://spdx.dev/Document" predicate, signifying that it is defining a SBOM for the image.
+
+```json
+{
+ "mediaType": "application/vnd.oci.image.manifest.v1+json",
+ "schemaVersion": 2,
+ "config": {
+ "mediaType": "application/vnd.oci.image.config.v1+json",
+ "digest": "sha256:a781560066f20ec9c28f2115a95a886e5e71c7c7aa9d8fd680678498b82f3ea3",
+ "size": 123
+ },
+ "layers": [
+ {
+ "mediaType": "application/vnd.in-toto+json",
+ "digest": "sha256:133ae3f9bcc385295b66c2d83b28c25a9f294ce20954d5cf922dda860429734a",
+ "size": 1234,
+ "annotations": {
+ "in-toto.io/predicate-type": "https://spdx.dev/Document"
+ }
+ }
+ ]
+}
+```
+
+#### Image config (`sha256:a781560066f20ec9c28f2115a95a886e5e71c7c7aa9d8fd680678498b82f3ea3`):
+
+```json
+{
+ "architecture": "unknown",
+ "os": "unknown",
+ "config": {},
+ "rootfs": {
+ "type": "layers",
+ "diff_ids": [
+ "sha256:133ae3f9bcc385295b66c2d83b28c25a9f294ce20954d5cf922dda860429734a"
+ ]
+ }
+}
+```
+
+#### Layer content (`sha256:1ea07d5e55eb47ad0e6bbfa2ec180fb580974411e623814e519064c88f022f5c`):
+
+Attestation body containing the SBOM data listing the packages used during the build in SPDX format.
+
+```json
+{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://spdx.dev/Document",
+ "subject": [
+ {
+ "name": "_",
+ "digest": {
+ "sha256": "23678f31b3b3586c4fb318aecfe64a96a1f0916ba8faf9b2be2abee63fa9e827"
+ }
+ }
+ ],
+ "predicate": {
+ "SPDXID": "SPDXRef-DOCUMENT",
+ "spdxVersion": "SPDX-2.2",
+ ...
+```
diff --git a/docs/attestations/sbom-protocol.md b/docs/attestations/sbom-protocol.md
new file mode 100644
index 000000000000..1505fbfffcca
--- /dev/null
+++ b/docs/attestations/sbom-protocol.md
@@ -0,0 +1,72 @@
+# SBOM Scanning Protocol
+
+BuildKit supports automatic creation of [SBOMs](https://en.wikipedia.org/wiki/Software_supply_chain)
+for builds, attaching them as [image attestations](./attestation-storage.md).
+
+To scan the filesystem contents, a user can specify an SBOM generator image.
+When run, this image is passed the rootfs of the build stage as a read-only
+mount, writes its SBOM scan data to a specified directory.
+
+The SBOM generator image is expected to follow the rules of the BuildKit SBOM
+generator protocol, defined in this document.
+
+> **Note**
+>
+> Currently, only SBOMs in the [SPDX](https://spdx.dev) JSON format are
+> supported.
+>
+> These SBOMs will be attached to the final image as an in-toto attestation
+> with the `https://spdx.dev/Document` predicate type.
+
+## Implementations
+
+The following SBOM generator images are available:
+
+- [docker/buildkit-syft-scanner](https://github.com/docker/buildkit-syft-scanner)
+
+## Parameters
+
+A single run of a generator may specify multiple target filesystems to scan by
+passing multiple paths - the scanner should scan all of them. Each filesystem
+target has a **name**, specified by the final component of the path for that
+target. A generator may produce any number of scans for the available targets -
+though ideally it should aim to produce a single scan per target.
+
+These parameters will be passed to the generator image as environment variables
+by BuildKit:
+
+- `BUILDKIT_SCAN_DESTINATION` (required)
+
+ This variable specifies the directory where the scanner should write its
+ SBOM data. Scanners should write their SBOMs to `$BUILDKIT_SCAN_DESTINATION/.spdx.json`
+ where `` is the name of an arbitrary scan. A scanner may produce
+ multiple scans for a single target - scan names must be unique within a
+ target, but should not be considered significant by producers or consumers.
+
+- `BUILDKIT_SCAN_SOURCE` (required)
+
+ This variable specifies the main target, passing the path to the root
+ filesystem of the final build result.
+
+ The scanner should scan this filesystem, and write its SBOM result to
+ `$BUILDKIT_SCAN_DESTINATION/$(basename $BUILDKIT_SCAN_SOURCE).spdx.json`.
+
+- `BUILDKIT_SCAN_SOURCE_EXTRAS` (optional)
+
+ This variable specifies additional targets, passing the path to a directory
+ of other root filesystems. If the variable is not set, is empty, or contains
+ a directory that does not exist, then no extras should be scanned.
+
+ The scanner should iterate through this directory, and write its SBOM scans
+ to `$BUILDKIT_SCAN_DESTINATION/.spdx.json`, similar to above.
+
+A scanner must not error if optional parameters are not set.
+
+The scanner should produce SBOM results for all filesystems specified in
+`BUILDKIT_SCAN_SOURCE` or `BUILDKIT_SCAN_SOURCE_EXTRAS` but must not produce
+SBOM results for any other filesystems.
+
+## Further reading
+
+See [frontend/attest/sbom.go](https://github.com/moby/buildkit/blob/master/frontend/attest/sbom.go)
+for the code that invokes the user-specified generator.
diff --git a/docs/attestations/sbom.md b/docs/attestations/sbom.md
new file mode 100644
index 000000000000..9cb008775a36
--- /dev/null
+++ b/docs/attestations/sbom.md
@@ -0,0 +1,198 @@
+# SBOMs
+
+BuildKit supports automatic creation of [SBOMs](https://en.wikipedia.org/wiki/Software_supply_chain)
+to record the software components that make up the final image. These consist
+of a list of software packages and the files that they own.
+
+They also usually contain metadata about each component, such as software
+licenses, authors, and unique package identifiers which can be used for
+vulnerability scanning.
+
+All SBOMs generated by BuildKit are wrapped inside [in-toto attestations](https://github.com/in-toto/attestation)
+in the [SPDX](https://spdx.dev) JSON format. They can be generated using
+generator images that follow the [SBOM generator protocol](./sbom-protocol.md).
+
+When the final output format is a container image, these SBOMs are attached
+using the [attestation storage](./attestation-storage.md).
+
+To build an image with an attached SBOM (derived using the builtin default scanner,
+[docker/buildkit-syft-scanner](https://github.com/docker/buildkit-syft-scanner)),
+use the `attest:sbom` option:
+
+```bash
+buildctl build \
+ --frontend=dockerfile.v0 \
+ --local context=. \
+ --local dockerfile=. \
+ --opt attest:sbom=
+```
+
+You can also specify a custom SBOM generator image:
+
+```bash
+buildctl build \
+ --frontend=dockerfile.v0 \
+ --local context=. \
+ --local dockerfile=. \
+ --opt attest:sbom=generator=/
+```
+
+## Dockerfile configuration
+
+By default, only the final build result is scanned - because of this, the
+resulting SBOM will not include build-time dependencies that may be installed
+in separate stages or the build context. This could cause you to miss
+vulnerabilities in those dependencies, which could impact the security of your
+final build artifacts.
+
+To include these build-time dependencies from your Dockerfile, you can set the
+build arguments `BUILDKIT_SBOM_SCAN_CONTEXT` and `BUILDKIT_SBOM_SCAN_STAGE` to
+additionally scan the build context and other build stages respectively. These
+build arguments are special values, and cannot be used for variable
+substitutions or as environment variables from within the Dockerfile, as they
+exist solely to change the behavior of the scanner.
+
+Both arguments can be set as global meta arguments (before a `FROM`) or can be
+individually set in each stage. If set globally, the value is propagated for
+each stage in the Dockerfile. They can take the following values:
+
+- `true`: enables context/stage scanning (e.g. `BUILDKIT_SBOM_SCAN_STAGE=true`)
+- `false`: disables context/stage scanning (e.g. `BUILDKIT_SBOM_SCAN_STAGE=false`)
+- `[,]`: enables context/stage scanning for all stages
+ listed in the comma-separated list of provided stages (e.g.
+ `BUILDKIT_SBOM_SCAN_STAGE=x,y` will scan stages called `x` and `y`).
+
+Scanning will *never* be enabled for a stage that is not built, even if it is
+enabled via the build arguments.
+
+For example:
+
+```dockerfile
+FROM alpine:latest as build
+# enable scanning for the intermediate build stage
+ARG BUILDKIT_SBOM_SCAN_STAGE=true
+WORKDIR /src
+COPY . .
+RUN ... # build some software
+
+FROM scratch as final
+# scan the build context only if the build is run to completion
+ARG BUILDKIT_SBOM_SCAN_CONTEXT=true
+COPY --from=build /path/to/software /path/to/software
+```
+
+You can also directly override these `ARG`s on the command line, by passing
+them as build arguments:
+
+```bash
+buildctl build \
+ --frontend=dockerfile.v0 \
+ --local context=. \
+ --local dockerfile=. \
+ --opt build-arg:BUILDKIT_SBOM_SCAN_STAGE= \
+ --opt build-arg:BUILDKIT_SBOM_SCAN_CONTEXT= \
+ --opt attest:sbom=
+```
+
+Scanning will only override existing `ARG` definitions in the Dockerfile, and
+so does not allow including other stages in the Dockerfile that do not declare
+`BUILDKIT_SBOM_SCAN` arguments.
+
+## Output
+
+To inspect the SBOMs that were generated, and attached to a container image,
+you can use the `docker buildx imagetools` command to explore the resulting
+image in your registry, following the format described in the [attestation storage](./attestation-storage.md).
+
+For example, for a simple Docker image based on `alpine:latest`, we might get
+the following SBOM:
+
+```json
+{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://spdx.dev/Document",
+ "subject": [
+ {
+ "name": "pkg:docker//@?platform=",
+ "digest": {
+ "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862"
+ }
+ }
+ ],
+ "predicate": {
+ "SPDXID": "SPDXRef-DOCUMENT",
+ "name": "/run/src/core",
+ "spdxVersion": "SPDX-2.2",
+ "creationInfo": {
+ "created": "2022-11-09T10:12:01.338817553Z",
+ "creators": [
+ "Organization: Anchore, Inc",
+ "Tool: syft-[not provided]"
+ ],
+ "licenseListVersion": "3.18"
+ },
+ "dataLicense": "CC0-1.0",
+ "documentNamespace": "https://anchore.com/syft/dir/run/src/core-4006bb64-24b1-4a22-a18f-94efc6b90edb",
+ "files": [
+ {
+ "SPDXID": "SPDXRef-1ac501c94e2f9f81",
+ "comment": "layerID: sha256:9b18e9b68314027565b90ff6189d65942c0f7986da80df008b8431276885218e",
+ "fileName": "/bin/busybox",
+ "licenseConcluded": "NOASSERTION"
+ },
+ ...
+ ],
+ "packages": [
+ {
+ "SPDXID": "SPDXRef-980737451f148c56",
+ "description": "Size optimized toolbox of many common UNIX utilities",
+ "downloadLocation": "https://busybox.net/",
+ "externalRefs": [
+ {
+ "referenceCategory": "SECURITY",
+ "referenceLocator": "cpe:2.3:a:busybox:busybox:1.35.0-r17:*:*:*:*:*:*:*",
+ "referenceType": "cpe23Type"
+ },
+ {
+ "referenceCategory": "PACKAGE_MANAGER",
+ "referenceLocator": "pkg:alpine/busybox@1.35.0-r17?arch=aarch64&upstream=busybox&distro=alpine-3.16.2",
+ "referenceType": "purl"
+ }
+ ],
+ "filesAnalyzed": false,
+ "hasFiles": [
+ "SPDXRef-1ac501c94e2f9f81",
+ ...
+ ],
+ "licenseConcluded": "GPL-2.0-only",
+ "licenseDeclared": "GPL-2.0-only",
+ "name": "busybox",
+ "originator": "Person: Sören Tempel ",
+ "sourceInfo": "acquired package info from APK DB: lib/apk/db/installed",
+ "versionInfo": "1.35.0-r17"
+ },
+ ...
+ ],
+ "relationships": [
+ {
+ "relatedSpdxElement": "SPDXRef-1ac501c94e2f9f81",
+ "relationshipType": "CONTAINS",
+ "spdxElementId": "SPDXRef-980737451f148c56"
+ },
+ ...
+ ]
+ }
+}
+```
+
+The exact output will depend on the generator you use, however, generally:
+
+- The `files` key will contain a list of all files in the image.
+- The `packages` key will contain a list of all packages discovered from the
+ image.
+- The `relationships` key links together various files and packages, together
+ with metadata about how they relate to each other.
+
+Entries in the `files` and `packages` will contain a `comment` field that
+contains the `sha256` digest of the layer which introduced it if that layer is
+present in the final image.
diff --git a/docs/attestations/slsa-definitions.md b/docs/attestations/slsa-definitions.md
new file mode 100644
index 000000000000..46e198efc1b2
--- /dev/null
+++ b/docs/attestations/slsa-definitions.md
@@ -0,0 +1,589 @@
+# SLSA definitions
+
+BuildKit supports the [creation of SLSA Provenance](./slsa-provenance.md) for builds that
+it runs.
+
+The provenance format generated by BuildKit is defined by the
+[SLSA Provenance format](https://slsa.dev/provenance/v0.2).
+
+This page describes how BuildKit populate each field, and whether the field gets
+included when you generate attestations `mode=min` and `mode=max`.
+
+## `builder.id` [(SLSA)](https://slsa.dev/provenance/v0.2#builder.id)
+
+Included with `mode=min` and `mode=max`.
+
+The `builder.id` field is set to the URL of the build, if available.
+
+```json
+ "builder": {
+ "id": "https://github.com/docker/buildx/actions/runs/3709599520"
+ },
+```
+
+This value can be set using the `builder-id` attestation parameter.
+
+## `buildType` [(SLSA)](https://slsa.dev/provenance/v0.2#buildType)
+
+Included with `mode=min` and `mode=max`.
+
+The `buildType` field is set to `https://mobyproject.org/buildkit@v1` can be
+used to determine the structure of the provenance content.
+
+```json
+ "buildType": "https://mobyproject.org/buildkit@v1",
+```
+
+## `invocation.configSource` [(SLSA)](https://slsa.dev/provenance/v0.2#invocation.configSource)
+
+Included with `mode=min` and `mode=max`.
+
+Describes the config that initialized the build.
+
+```json
+ "invocation": {
+ "configSource": {
+ "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0",
+ "digest": {
+ "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0"
+ },
+ "entryPoint": "Dockerfile"
+ },
+ ...
+ },
+```
+
+For builds initialized from a remote context, like a Git or HTTP URL, this
+object defines the context URL and its immutable digest in the `uri` and `digest` fields.
+For builds using a local frontend, such as a Dockerfile, the `entryPoint` field defines the path
+for the frontend file that initialized the build (`filename` frontend option).
+
+## `invocation.parameters` [(SLSA)](https://slsa.dev/provenance/v0.2#invocation.parameters)
+
+Partially included with `mode=min`.
+
+Describes build inputs passed to the build.
+
+```json
+ "invocation": {
+ "parameters": {
+ "frontend": "gateway.v0",
+ "args": {
+ "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR": "1",
+ "label:FOO": "bar",
+ "source": "docker/dockerfile-upstream:master",
+ "target": "release"
+ },
+ "secrets": [
+ {
+ "id": "GIT_AUTH_HEADER",
+ "optional": true
+ },
+ ...
+ ],
+ "ssh": [],
+ "locals": []
+ },
+ ...
+ },
+```
+
+The following fields are included with both `mode=min` and `mode=max`:
+
+- `locals` lists any local sources used in the build, including the build
+ context and frontend file.
+- `frontend` defines type of BuildKit frontend used for the build. Currently,
+ this can be `dockerfile.v0` or `gateway.v0`.
+- `args` defines the build arguments passed to the BuildKit frontend.
+
+ The keys inside the `args` object reflect the options as BuildKit receives
+ them. For example, `build-arg` and `label` prefixes are used for build
+ arguments and labels, and `target` key defines the target stage that was
+ built. The `source` key defines the source image for the Gateway frontend, if
+ used.
+
+The following fields are only included with `mode=max`:
+
+- `secrets` defines secrets used during the build. Note that actual secret
+ values are not included.
+- `ssh` defines the ssh forwards used during the build.
+
+## `invocation.environment` [(SLSA)](https://slsa.dev/provenance/v0.2#invocation.environment)
+
+Included with `mode=min` and `mode=max`.
+
+```json
+ "invocation": {
+ "environment": {
+ "platform": "linux/amd64"
+ },
+ ...
+ },
+```
+
+The only value BuildKit currently sets is the `platform` of the current build
+machine. Note that this is not necessarily the platform of the build result that
+can be determined from the `in-toto` subject field.
+
+## `materials` [(SLSA)](https://slsa.dev/provenance/v0.2#materials)
+
+Included with `mode=min` and `mode=max`.
+
+Defines all the external artifacts that were part of the build. The value
+depends on the type of artifact:
+
+- The URL of Git repositories containing source code for the image
+- HTTP URLs if you are building from a remote tarball, or that was included
+ using an `ADD` command in Dockerfile
+- Any Docker images used during the build
+
+The URLs to the Docker images will be in
+[Package URL](https://github.com/package-url/purl-spec) format.
+
+All the build materials will include the immutable checksum of the artifact.
+When building from a mutable tag, you can use the digest information to
+determine if the artifact has been updated compared to when the build ran.
+
+```json
+ "materials": [
+ {
+ "uri": "pkg:docker/alpine@3.17?platform=linux%2Famd64",
+ "digest": {
+ "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4"
+ }
+ },
+ {
+ "uri": "https://github.com/moby/buildkit.git#refs/tags/v0.11.0",
+ "digest": {
+ "sha1": "4b220de5058abfd01ff619c9d2ff6b09a049bea0"
+ }
+ },
+ ...
+ ],
+```
+
+## `buildConfig` [(SLSA)](https://slsa.dev/provenance/v0.2#buildConfig)
+
+Only included with `mode=max`.
+
+Defines the build steps performed during the build.
+
+BuildKit internally uses LLB definition to execute the build steps. The LLB
+definition of the build steps is defined in `buildConfig.llbDefinition` field.
+
+Each LLB step is the JSON definition of the
+[LLB ProtoBuf API](https://github.com/moby/buildkit/blob/v0.10.0/solver/pb/ops.proto).
+The dependencies for a vertex in the LLB graph can be found in the `inputs`
+field for every step.
+
+```json
+ "buildConfig": {
+ "llbDefinition": [
+ {
+ "id": "step0",
+ "op": {
+ "Op": {
+ "exec": {
+ "meta": {
+ "args": [
+ "/bin/sh",
+ "-c",
+ "go build ."
+ ],
+ "env": [
+ "PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+ "GOPATH=/go",
+ "GOFLAGS=-mod=vendor",
+ ],
+ "cwd": "/src",
+ },
+ "mounts": [...]
+ }
+ },
+ "platform": {...},
+ },
+ "inputs": [
+ "step8:0",
+ "step2:0",
+ ]
+ },
+ ...
+ ]
+ },
+```
+
+## `metadata.buildInvocationId` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.buildIncocationId)
+
+Included with `mode=min` and `mode=max`.
+
+Unique identifier for the build invocation. When building a multi-platform image
+with a single build request, this value will be the shared by all the platform
+versions of the image.
+
+```json
+ "metadata": {
+ "buildInvocationID": "rpv7a389uzil5lqmrgwhijwjz",
+ ...
+ },
+```
+
+## `metadata.buildStartedOn` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.buildStartedOn)
+
+Included with `mode=min` and `mode=max`.
+
+Timestamp when the build started.
+
+```json
+ "metadata": {
+ "buildStartedOn": "2021-11-17T15:00:00Z",
+ ...
+ },
+```
+
+## `metadata.buildFinishedOn` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.buildFinishedOn)
+
+Included with `mode=min` and `mode=max`.
+
+Timestamp when the build finished.
+
+```json
+ "metadata": {
+ "buildFinishedOn": "2021-11-17T15:01:00Z",
+ ...
+ },
+```
+
+## `metadata.completeness` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.completeness)
+
+Included with `mode=min` and `mode=max`.
+
+Defines if the provenance information is complete.
+
+`completeness.parameters` is true if all the build arguments are included in the
+`invocation.parameters` field. When building with `min` mode, the build
+arguments are not included in the provenance information and parameters are not
+complete. Parameters are also not complete on direct LLB builds that did not use
+a frontend.
+
+`completeness.environment` is always true for BuildKit builds.
+
+`completeness.materials` is true if `materials` field includes all the
+dependencies of the build. When building from un-tracked source in a local
+directory, the materials are not complete, while when building from a remote Git
+repository all materials can be tracked by BuildKit and `completeness.materials`
+is true.
+
+```json
+ "metadata": {
+ "completeness": {
+ "parameters": true,
+ "environment": true,
+ "materials": true
+ },
+ ...
+ },
+```
+
+## `metadata.reproducible` [(SLSA)](https://slsa.dev/provenance/v0.2#metadata.reproducible)
+
+Defines if the build result is supposed to be byte-by-byte reproducible. This
+value can be set by the user with the `reproducible=true` attestation parameter.
+
+```json
+ "metadata": {
+ "reproducible": false,
+ ...
+ },
+```
+
+## `metadata.https://mobyproject.org/buildkit@v1#hermetic`
+
+Included with `mode=min` and `mode=max`.
+
+This extension field is set to true if the build was hermetic and did not access
+the network. In Dockerfiles, a build is hermetic if it does not use `RUN`
+commands or disables network with `--network=none` flag.
+
+```json
+ "metadata": {
+ "https://mobyproject.org/buildkit@v1#hermetic": true,
+ ...
+ },
+```
+
+## `metadata.https://mobyproject.org/buildkit@v1#metadata`
+
+Partially included with `mode=min`.
+
+This extension field defines BuildKit-specific additional metadata that is not
+part of the SLSA provenance spec.
+
+```json
+ "metadata": {
+ "https://mobyproject.org/buildkit@v1#metadata": {
+ "source": {...},
+ "layers": {...},
+ "vcs": {...},
+ },
+ ...
+ },
+```
+
+### `source`
+
+Only included with `mode=max`.
+
+Defines a source mapping of LLB build steps, defined in the
+`buildConfig.llbDefinition` field, to their original source code (for example,
+Dockerfile commands). The `source.locations` field contains the ranges of all
+the Dockerfile commands ran in an LLB step. `source.infos` array contains the
+source code itself. This mapping is present if the BuildKit frontend provided it
+when creating the LLB definition.
+
+### `layers`
+
+Only included with `mode=max`.
+
+Defines the layer mapping of LLB build step mounts defined in
+`buildConfig.llbDefinition` to the OCI descriptors of equivalent layers. This
+mapping is present if the layer data was available, usually when attestation is
+for an image or if the build step pulled in image data as part of the build.
+
+### `vcs`
+
+Included with `mode=min` and `mode=max`.
+
+Defines optional metadata for the version control system used for the build. If
+a build uses a remote context from Git repository, BuildKit extracts the details
+of the version control system automatically and displays it in the
+`invocation.configSource` field. But if the build uses a source from a local
+directory, the VCS information is lost even if the directory contained a Git
+repository. In this case, the build client can send additional `vcs:source` and
+`vcs:revision` build options and BuildKit will add them to the provenance
+attestations as extra metadata. Note that, contrary to the
+`invocation.configSource` field, BuildKit doesn't verify the `vcs` values, and
+as such they can't be trusted and should only be used as a metadata hint.
+
+## Output
+
+To inspect the provenance that was generated and attached to a container image,
+you can use the `docker buildx imagetools` command to inspect the image in a
+registry. Inspecting the attestation displays the format described in the
+[attestation storage specification](./attestation-storage.md).
+
+For example, inspecting a simple Docker image based on `alpine:latest` results
+in a provenance attestation similar to the following, for a `mode=min` build:
+
+```json
+{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://slsa.dev/provenance/v0.2",
+ "subject": [
+ {
+ "name": "pkg:docker//@?platform=",
+ "digest": {
+ "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862"
+ }
+ }
+ ],
+ "predicate": {
+ "builder": {
+ "id": ""
+ },
+ "buildType": "https://mobyproject.org/buildkit@v1",
+ "materials": [
+ {
+ "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64",
+ "digest": {
+ "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4"
+ }
+ }
+ ],
+ "invocation": {
+ "configSource": {
+ "entryPoint": "Dockerfile"
+ },
+ "parameters": {
+ "frontend": "dockerfile.v0",
+ "args": {},
+ "locals": [
+ {
+ "name": "context"
+ },
+ {
+ "name": "dockerfile"
+ }
+ ]
+ },
+ "environment": {
+ "platform": "linux/amd64"
+ }
+ },
+ "metadata": {
+ "buildInvocationID": "yirbp1aosi1vqjmi3z6bc75nb",
+ "buildStartedOn": "2022-12-08T11:48:59.466513707Z",
+ "buildFinishedOn": "2022-12-08T11:49:01.256820297Z",
+ "reproducible": false,
+ "completeness": {
+ "parameters": true,
+ "environment": true,
+ "materials": false
+ },
+ "https://mobyproject.org/buildkit@v1#metadata": {}
+ }
+ }
+}
+```
+
+For a similar build, but with `mode=max`:
+
+```json
+{
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://slsa.dev/provenance/v0.2",
+ "subject": [
+ {
+ "name": "pkg:docker//@?platform=",
+ "digest": {
+ "sha256": "e8275b2b76280af67e26f068e5d585eb905f8dfd2f1918b3229db98133cb4862"
+ }
+ }
+ ],
+ "predicate": {
+ "builder": {
+ "id": ""
+ },
+ "buildType": "https://mobyproject.org/buildkit@v1",
+ "materials": [
+ {
+ "uri": "pkg:docker/alpine@latest?platform=linux%2Famd64",
+ "digest": {
+ "sha256": "8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4"
+ }
+ }
+ ],
+ "invocation": {
+ "configSource": {
+ "entryPoint": "Dockerfile"
+ },
+ "parameters": {
+ "frontend": "dockerfile.v0",
+ "args": {},
+ "locals": [
+ {
+ "name": "context"
+ },
+ {
+ "name": "dockerfile"
+ }
+ ]
+ },
+ "environment": {
+ "platform": "linux/amd64"
+ }
+ },
+ "buildConfig": {
+ "llbDefinition": [
+ {
+ "id": "step0",
+ "op": {
+ "Op": {
+ "source": {
+ "identifier": "docker-image://docker.io/library/alpine:latest@sha256:8914eb54f968791faf6a8638949e480fef81e697984fba772b3976835194c6d4"
+ }
+ },
+ "platform": {
+ "Architecture": "amd64",
+ "OS": "linux"
+ },
+ "constraints": {}
+ }
+ },
+ {
+ "id": "step1",
+ "op": {
+ "Op": null
+ },
+ "inputs": ["step0:0"]
+ }
+ ]
+ },
+ "metadata": {
+ "buildInvocationID": "46ue2x93k3xj5l463dektwldw",
+ "buildStartedOn": "2022-12-08T11:50:54.953375437Z",
+ "buildFinishedOn": "2022-12-08T11:50:55.447841328Z",
+ "reproducible": false,
+ "completeness": {
+ "parameters": true,
+ "environment": true,
+ "materials": false
+ },
+ "https://mobyproject.org/buildkit@v1#metadata": {
+ "source": {
+ "locations": {
+ "step0": {
+ "locations": [
+ {
+ "ranges": [
+ {
+ "start": {
+ "line": 1
+ },
+ "end": {
+ "line": 1
+ }
+ }
+ ]
+ }
+ ]
+ }
+ },
+ "infos": [
+ {
+ "filename": "Dockerfile",
+ "data": "RlJPTSBhbHBpbmU6bGF0ZXN0Cg==",
+ "llbDefinition": [
+ {
+ "id": "step0",
+ "op": {
+ "Op": {
+ "source": {
+ "identifier": "local://dockerfile",
+ "attrs": {
+ "local.differ": "none",
+ "local.followpaths": "[\"Dockerfile\",\"Dockerfile.dockerignore\",\"dockerfile\"]",
+ "local.session": "q2jnwdkas0i0iu4knchd92jaz",
+ "local.sharedkeyhint": "dockerfile"
+ }
+ }
+ },
+ "constraints": {}
+ }
+ },
+ {
+ "id": "step1",
+ "op": {
+ "Op": null
+ },
+ "inputs": ["step0:0"]
+ }
+ ]
+ }
+ ]
+ },
+ "layers": {
+ "step0:0": [
+ [
+ {
+ "mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
+ "digest": "sha256:c158987b05517b6f2c5913f3acef1f2182a32345a304fe357e3ace5fadcad715",
+ "size": 3370706
+ }
+ ]
+ ]
+ }
+ }
+ }
+ }
+}
+```
diff --git a/docs/attestations/slsa-provenance.md b/docs/attestations/slsa-provenance.md
new file mode 100644
index 000000000000..a1437e717027
--- /dev/null
+++ b/docs/attestations/slsa-provenance.md
@@ -0,0 +1,99 @@
+# SLSA provenance
+
+BuildKit supports automatic creation of provenance attestations for the build
+process. Provenance attestations record information describing how a build was
+created, and is important for tracking the security of your software artifacts.
+
+Provenance attestations created by BuildKit include details such as:
+
+- Build parameters and environment.
+- Build timestamps.
+- Version control metadata for your build sources.
+- Build dependencies with their immutable checksums. For example, base images or external URLs used by the build.
+- Descriptions of all build steps, with their source and layer mappings.
+
+Provenance generated by BuildKit is wrapped inside [in-toto attestations](https://github.com/in-toto/attestation)
+in the [SLSA Provenance format](https://slsa.dev/provenance/v0.2).
+
+For more information about how the attestation fields get generated, see [SLSA definitions](./slsa-definitions.md).
+
+## Build with provenance attestations
+
+To build an image with provenance attestations using `buildctl`, use the `attest:provenance` option:
+
+```bash
+buildctl build \
+ --frontend=dockerfile.v0 \
+ --local context=. \
+ --local dockerfile=. \
+ --opt attest:provenance=
+```
+
+You can also customize the attestations using parameters:
+
+```bash
+buildctl build \
+ --frontend=dockerfile.v0 \
+ --local context=. \
+ --local dockerfile=. \
+ --opt attest:provenance=mode=min,inline-only=true
+```
+
+All BuildKit exporters support attaching attestations to build results.
+When the final output format is a container image (`image` or `oci` exporter), provenance is attached
+to the image using the format described in the [attestation storage specification](./attestation-storage.md).
+When creating a multi-platform image, each platform version of the image gets its own provenance.
+
+If you use the `local` or `tar` exporter, the provenance will be written to a file named `provenance.json`
+and exported with your build result, in the root directory.
+
+## Parameters
+
+| Parameter | Type | Default | Description |
+| -------------- | -------------- | ---------------- | ----------------------------------------------------------------------------------------------------------- |
+| `mode` | `min`,`max` | `max` | Configures the amount of provenance to be generated. See [mode](#mode) |
+| `builder-id` | String | | Explicitly set SLSA [`builder.id`](https://slsa.dev/provenance/v0.2#builder.id) field |
+| `filename` | String | `provenance.json` | Set filename for provenance attestation when exported with `local` or `tar` exporter |
+| `reproducible` | `true`,`false` | `false` | Explicitly set SLSA [`metadata.reproducible`](https://slsa.dev/provenance/v0.2#metadata.reproducible) field |
+| `inline-only` | `true`,`false` | `false` | Only embed provenance into exporters that support inline content. See [inline-only](#inline-only) |
+
+### `mode`
+
+Provenance can be generated in one of two modes: `min` or `max`. By default,
+when provenance is enabled, the `mode` parameter will be set to `max`.
+
+In `min` mode, BuildKit generates only the bare minimum amount of provenance,
+including:
+
+- Build timestamps
+- The frontend used
+- The build materials
+
+However, the values of build arguments, the identities of secrets, and rich
+layer metadata will not be included. `mode=min` should be safe to set on all
+builds, as it does not leak information from any part of the build environment.
+
+In `max` mode, BuildKit generates all of the above, as well as:
+
+- The source Dockerfile, and rich layer metadata with sourcemaps to connect the
+ source with the build result
+- The values of passed build arguments
+- Metadata about secrets and ssh mounts
+
+Wherever possible, you should prefer `mode=max` as it contains significantly
+more detailed information for analysis. However, on some builds it may not be
+appropriate, as it includes the values of various build arguments and metadata
+about secrets - these builds should be refactored to prefer passing hidden
+values through secrets wherever possible to prevent unnecessary information
+leakage.
+
+### `inline-only`
+
+By default, provenance is by included in all exporters that support
+attestations. The `inline-only` parameter allows configuring this behavior, to
+only include the provenance results in exporters that support inline content,
+specifically only exporters that produce container images.
+
+Since other exporters produce attestations into separate files, in their
+filesystems, you may not want to include the provenance in these cases.
+
diff --git a/docs/build-repro.md b/docs/build-repro.md
index 4c11bd5755a1..9b83ec0593ec 100644
--- a/docs/build-repro.md
+++ b/docs/build-repro.md
@@ -1,129 +1,92 @@
# Build reproducibility
-## Build dependencies
+## Reproducing the pinned dependencies
-Build dependencies are generated when your image has been built. These
-dependencies include versions of used images, git repositories and HTTP URLs
-used by LLB `Source` operation as well as build request attributes.
+Reproducing the pinned dependencies is supported since BuildKit v0.11.
-The structure is base64 encoded and has the following format when decoded:
+e.g.,
+```bash
+buildctl build --frontend dockerfile.v0 --local dockerfile=. --local context=. --source-policy-file policy.json
+```
+An example `policy.json`:
```json
{
- "frontend": "dockerfile.v0",
- "attrs": {
- "build-arg:foo": "bar",
- "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
- "filename": "Dockerfile",
- "platform": "linux/amd64,linux/arm64",
- "source": "crazymax/dockerfile:master"
- },
- "sources": [
- {
- "type": "docker-image",
- "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
- "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
- },
- {
- "type": "docker-image",
- "ref": "docker.io/library/alpine:3.13",
- "pin": "sha256:1d30d1ba3cb90962067e9b29491fbd56997979d54376f23f01448b5c5cd8b462"
- },
+ "rules": [
{
- "type": "git",
- "ref": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
- "pin": "259a5aa5aa5bb3562d12cc631fe399f4788642c1"
+ "action": "CONVERT",
+ "source": {
+ "type": "docker-image",
+ "identifier": "docker.io/library/alpine:latest"
+ },
+ "destination": {
+ "identifier": "docker-image://docker.io/library/alpine:latest@sha256:4edbd2beb5f78b1014028f4fbb99f3237d9561100b6881aabbf5acce2c4f9454"
+ }
},
{
- "type": "http",
- "ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
- "pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
+ "action": "CONVERT",
+ "source": {
+ "type": "http",
+ "identifier": "https://raw.githubusercontent.com/moby/buildkit/v0.10.1/README.md"
+ },
+ "destination": {
+ "attrs": {"http.checksum": "sha256:6e4b94fc270e708e1068be28bd3551dc6917a4fc5a61293d51bb36e6b75c4b53"}
+ }
}
]
}
```
-* `frontend` defines the frontend used to build.
-* `attrs` defines build request attributes.
-* `sources` defines build sources.
- * `type` defines the source type (`docker-image`, `git` or `http`).
- * `ref` is the reference of the source.
- * `pin` is the source digest.
-* `deps` defines build dependencies of input contexts.
+Any source type is supported, but how to pin a source depends on the type.
-### Image config
+## `SOURCE_DATE_EPOCH`
+[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/docs/source-date-epoch/) is the convention for pinning timestamps to a specific value.
-A new field similar to the one for inline cache has been added to the image
-configuration to embed build dependencies:
+The Dockerfile frontend supports consuming the `SOURCE_DATE_EPOCH` value as a special build arg, since BuildKit 0.11.
+Minimal support is also available on older BuildKit when using Dockerfile 1.5 frontend.
-```json
-{
- "moby.buildkit.buildinfo.v0": ""
-}
+```console
+buildctl build --frontend dockerfile.v0 --opt build-arg:SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) ...
```
-By default, the build dependencies are inlined in the image configuration. You
-can disable this behavior with the [`buildinfo` attribute](../README.md#imageregistry).
+The `buildctl` CLI does not automatically propagate the `$SOURCE_DATE_EPOCH` environment value from the client host to the `SOURCE_DATE_EPOCH` build arg.
+However, higher level build tools, such as Docker Buildx (>= 0.10), may automatically capture the environment value.
-### Exporter response (metadata)
+The build arg value is used for:
+- the `created` timestamp in the [OCI Image Config](https://github.com/opencontainers/image-spec/blob/main/config.md#properties)
+- the `created` timestamp in the `history` objects in the [OCI Image Config](https://github.com/opencontainers/image-spec/blob/main/config.md#properties)
+- the `org.opencontainers.image.created` annotation in the [OCI Image Index](https://github.com/opencontainers/image-spec/blob/main/annotations.md#pre-defined-annotation-keys)
+- the timestamp of the files exported with the `local` exporter
+- the timestamp of the files exported with the `tar` exporter
-The solver response (`ExporterResponse`) also contains a new key
-`containerimage.buildinfo` with the same structure as image config encoded in
-base64:
+The build arg value is not used for the timestamps of the files inside the image currently ([Caveats](#caveats)).
-```json
-{
- "ExporterResponse": {
- "containerimage.buildinfo": "",
- "containerimage.digest": "sha256:..."
- }
-}
-```
+See also the [documentation](/frontend/dockerfile/docs/reference.md#buildkit-built-in-build-args) of the Dockerfile frontend.
-If multi-platforms are specified, they will be suffixed with the corresponding
-platform:
+## Caveats
+### Timestamps of the files inside the image
+Currently, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of the files inside the image.
-```json
-{
- "ExporterResponse": {
- "containerimage.buildinfo/linux/amd64": "",
- "containerimage.buildinfo/linux/arm64": "",
- "containerimage.digest": "sha256:..."
- }
-}
+Workaround:
+```dockerfile
+# Limit the timestamp upper bound to SOURCE_DATE_EPOCH.
+# Workaround for https://github.com/moby/buildkit/issues/3180
+ARG SOURCE_DATE_EPOCH
+RUN find $( ls / | grep -E -v "^(dev|mnt|proc|sys)$" ) -newermt "@${SOURCE_DATE_EPOCH}" -writable -xdev | xargs touch --date="@${SOURCE_DATE_EPOCH}" --no-dereference
```
-### Metadata JSON output
+The `touch` command above is [not effective](https://github.com/moby/buildkit/issues/3309) for mount point directories.
+A workaround is to create mount point directories below `/dev` (tmpfs) so that the mount points will not be included in the image layer.
-If you're using the `--metadata-file` flag with [`buildctl`](../README.md#metadata),
-[`buildx build`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_build.md)
-or [`buildx bake`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_bake.md):
+### Timestamps of whiteouts
+Currently, the `SOURCE_DATE_EPOCH` value is not used for the timestamps of "whiteouts" that are created on removing files.
-```shell
-jq '.' metadata.json
-```
-```json
-{
- "containerimage.buildinfo": {
- "frontend": "dockerfile.v0",
- "attrs": {
- "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
- "filename": "Dockerfile",
- "source": "docker/dockerfile:master"
- },
- "sources": [
- {
- "type": "docker-image",
- "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
- "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
- },
- {
- "type": "docker-image",
- "ref": "docker.io/library/alpine:3.13",
- "pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
- }
- ]
- },
- "containerimage.digest": "sha256:..."
-}
+Workaround:
+```dockerfile
+# Squash the entire stage for resetting the whiteout timestamps.
+# Workaround for https://github.com/moby/buildkit/issues/3168
+FROM scratch
+COPY --from=0 / /
```
+
+The timestamps of the regular files in the original stage are maintained in the squashed stage, so you do not need to touch the files after this `COPY` instruction.
diff --git a/docs/buildctl.md b/docs/buildctl.md
new file mode 100644
index 000000000000..bd95c27526d5
--- /dev/null
+++ b/docs/buildctl.md
@@ -0,0 +1,220 @@
+# buildctl
+
+`buildctl` is the command-line interface to `buildkitd`.
+
+```
+NAME:
+ buildctl - build utility
+
+USAGE:
+ buildctl [global options] command [command options] [arguments...]
+
+VERSION:
+ 0.0.0+unknown
+
+COMMANDS:
+ du disk usage
+ prune clean up build cache
+ build, b build
+ debug debug utilities
+ help, h Shows a list of commands or help for one command
+
+GLOBAL OPTIONS:
+ --debug enable debug output in logs
+ --addr value buildkitd address (default: "unix:///run/buildkit/buildkitd.sock")
+ --tlsservername value buildkitd server name for certificate validation
+ --tlscacert value CA certificate for validation
+ --tlscert value client certificate
+ --tlskey value client key
+ --tlsdir value directory containing CA certificate, client certificate, and client key
+ --timeout value timeout backend connection after value seconds (default: 5)
+ --help, -h show help
+ --version, -v print the version
+```
+
+## Connecting
+
+`buildctl` connects to a running `buildkitd` instance. The connection can is in a URL format of `://`.
+Supported `` is any supported by [net.Dialer.DialContext()](https://pkg.go.dev/net#Dialer.DialContext).
+Practically, that normally will be one of:
+
+* Unix-domain socket via `unix://path/to/socket`, e.g. `unix:///run/buildkit/buildkitd.sock` (which is the default)
+* TCP socket via `tcp://:`, e.g. `tcp://10.0.0.1:2555`
+
+## `build`
+
+Synopsis:
+
+```
+buildctl build --frontend dockerfile.v0 --opt target=foo --opt build-arg:foo=bar --local context=. --local dockerfile=. --output type=image,name=docker.io/username/image,push=true
+```
+
+`buildctl build` uses a buildkit daemon `buildkitd` to drive a build.
+
+The build consists of the following key elements:
+
+* [frontend definition](#frontend): parses the build descriptor, e.g. dockerfile
+* [local sources](#local_sources): sets relevant directories and files passed to the build
+* [frontend options](#frontend_options): options that are relevant to the particular frontend
+* [output](#output): defines what format of output to use and where to place it
+* [cache](#cache): defines where to export the cache generated during the build to, or where to import from
+
+### frontend
+
+The frontend is declared by the flag `--frontend `. The `` must be one built into `buildkitd`, or an OCI
+image that implements the frontend API.
+
+In the above example, we are using the built-in `dockerfile.v0` frontend, which knows how to parse a dockerfile and convert it to LLB.
+
+There currently are two options for `--frontend`:
+
+* `dockerfile.v0`: uses the dockerfile-to-LLB frontend converter that is built into buildkitd.
+* `gateway.v0`: uses any OCI image that implements the front-end API, with the image provided by `--opt source=`.
+
+### local sources
+
+A build may need access to various sources local to the `buildctl` execution environment,
+such as files and directories or OCI images. These can be provided from the local
+environment to which the user of `buildctl` has access. These are provided as:
+
+* `--local =` - allow buildkitd to access a local-to-buildctl directory `` under the unique name ``.
+* `--oci-layout =` - allow buildkitd to access OCI images in the local-to-buildctl directory `` under the unique name ``.
+
+Each of the above is expected to provide a unique name, for this invocation of `buildctl`, for a directory. Other parts of `buildctl` can then
+use those "named contexts" to reference directories, files or OCI images.
+
+For example:
+
+```
+buildctl build --local test1=/var/lib/test1
+```
+
+lets `buildkitd` access all of the files in `/var/lib/test1` (relative to wherever `buildctl` is running), referenced via the name `test1`.
+
+Similarly:
+
+```
+buildctl build --oci-layout foo=/var/lib/oci
+```
+
+lets `buildkitd` access OCI images under `/var/lib/oci` (relative to wherever `buildctl` is running), referenced via the name `test1`.
+
+These "named references" are used by the frontend, either directly or with explicit options.
+
+#### dockerfile frontend sources
+
+The dockerfile frontend, enabled via `buildctl build --frontend=dockerfile.v0`, expects to have access to 2 named references:
+
+* `context`: where to perform the build.
+* `dockerfile`: where to find the dockerfile to parse describing the build.
+
+Thus, a dockerfile build invocation would include:
+
+```
+buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=.
+```
+
+The above means, "build using the dockerfile frontend, passing it the context of the current directory where I am running `buildctl`, and the
+dockerfile in the current directory as well."
+
+### frontend options
+
+Frontend-specific options are defined via `--opt =`. The specific meanings of those are frontend-specific.
+
+#### dockerfile-specific options
+
+In the above example, we define two:
+
+* `--opt target=foo` - build only until the dockerfile target stage `foo`, the equivalent of `docker buildx build --target=foo`.
+* `--opt build-arg:foo=bar` - set the build argument `foo` to `bar`.
+
+In addition, the dockerfile front-end supports additional build contexts. These allow you to "alias" an image reference or name
+with something else entirely.
+
+To use the build contexts, pass `--opt context:=`, where the `` is the name in the dockerfile,
+and `` is a properly formatted target. These can be the following:
+
+* `--opt context:alpine=foo1` - replace usage of `alpine` with a named context `foo1`, that already should have been loaded via `--local`.
+* `--opt context:alpine=foo2@sha256:bd04a5b26dec16579cd1d7322e949c5905c4742269663fcbc84dcb2e9f4592fb` - replace usage of `alpine` with the image or index whose sha256 hash is `bd04a5b26dec16579cd1d7322e949c5905c4742269663fcbc84dcb2e9f4592fb` from an OCI layout whose named context `foo2`, that already should have been loaded via `--oci-layout`.
+* `--opt context:alpine=docker-image://docker.io/library/ubuntu:latest` - replace usage of `alpine` with the docker image `docker.io/library/ubuntu:latest` from the registry.
+* `--opt context:alpine=https://example.com/foo/bar.git` - replace usage of alpine with the contents of the git repository at `https://example.com/foo/bar.git`
+
+Complete examples of using local and OCI layout:
+
+```sh
+$ buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=. --local foo1=/home/dir/abc --opt context:alpine=foo1
+$ buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=. --oci-layout foo2=/home/dir/oci --opt context:alpine=foo2@sha256:bd04a5b26dec16579cd1d7322e949c5905c4742269663fcbc84dcb2e9f4592fb
+```
+
+#### gateway-specific options
+
+The `gateway.v0` frontend passes all of its `--opt` options on to the OCI image that is called to convert the
+input to LLB. The one requires option is `--opt source=`, which defines the OCI image to use to convert
+the input to LLB.
+
+For example:
+
+```
+buildctl build \
+ --frontend gateway.v0 \
+ --opt source=docker/dockerfile \
+ --local context=. \
+ --local dockerfile=.
+```
+
+Will use `docker/dockerfile` image to convert the Dockerfile input to LLB.
+
+Other `--opt` options are passed to the frontend.
+
+### output
+
+Output defines what to do with the resultant artifact of the build. It should be a series of key=value pairs, comma-separated, the first of
+which must be `type=`, where `` is one of the supported types. The result of the options depend on the type.
+
+In our above example:
+
+```
+--output type=image,name=docker.io/username/image,push=true
+```
+
+* `type=image`: output an OCI image.
+* `name=docker.io/username/image`: the name of the image is `docker.io/username/image`.
+* `push=true`: attempt to push the generated image to the registry using the `name`
+
+### cache
+
+Cache defines options for buildkit to do one or both of:
+
+* at the end of the build, export additions to cache from the build to external locations
+* at the beginning of the build, import artifacts into the cache from external locations for use during the build
+
+#### export cache
+
+During the build process, `buildkitd` generates cache layers. These can be exported at the end of the build via:
+
+```
+--export-cache type=,=,...
+```
+
+The `` options are defined for the given types.
+
+For example:
+
+* `--export-cache type=registry,ref=example.com/foo/bar` - export the cache to an OCI image.
+* `--export-cache type=local,dest=path/to/dir` - export the cache to a directory local to where `buildctl` is running.
+
+#### import cache
+
+During the build process, `buildkitd` uses its local cache to optimize its build. In addition, you
+can augment what is in the cache from external locations, i.e. seed the cache.
+
+```
+--import-cache type=,=
+```
+
+The `` options are defined for the given types, and match those for `--export-cache`.
+
+For example:
+
+* `--import-cache type=registry,ref=example.com/foo/bar` - import into the cache from an OCI image.
+* `--import-cache type=local,src=path/to/dir` - import into the cache from a directory local to where `buildctl` is running.
diff --git a/docs/buildinfo.md b/docs/buildinfo.md
new file mode 100644
index 000000000000..8863a5ea2d08
--- /dev/null
+++ b/docs/buildinfo.md
@@ -0,0 +1,141 @@
+# Build information
+
+> **Warning**
+>
+> Build information is deprecated and will be removed in the next release. See
+> the [Deprecated features page](https://github.com/moby/buildkit/blob/master/docs/deprecated.md)
+> for status and alternative recommendation about this feature.
+
+Build information structures are generated with build metadata that allows you
+to see all the sources (images, git repositories) that were used by the build
+with their exact versions and also the configuration that was passed to the
+build. This information is also embedded into the image configuration if one
+is generated.
+
+## Build dependencies
+
+Build dependencies are generated when your image has been built. These
+dependencies include versions of used images, git repositories and HTTP URLs
+used by LLB `Source` operation as well as build request attributes.
+
+The structure is base64 encoded and has the following format when decoded:
+
+```json
+{
+ "frontend": "dockerfile.v0",
+ "attrs": {
+ "build-arg:foo": "bar",
+ "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
+ "filename": "Dockerfile",
+ "platform": "linux/amd64,linux/arm64",
+ "source": "crazymax/dockerfile:master"
+ },
+ "sources": [
+ {
+ "type": "docker-image",
+ "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
+ "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
+ },
+ {
+ "type": "docker-image",
+ "ref": "docker.io/library/alpine:3.13",
+ "pin": "sha256:1d30d1ba3cb90962067e9b29491fbd56997979d54376f23f01448b5c5cd8b462"
+ },
+ {
+ "type": "git",
+ "ref": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
+ "pin": "259a5aa5aa5bb3562d12cc631fe399f4788642c1"
+ },
+ {
+ "type": "http",
+ "ref": "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md",
+ "pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
+ }
+ ]
+}
+```
+
+* `frontend` defines the frontend used to build.
+* `attrs` defines build request attributes.
+* `sources` defines build sources.
+ * `type` defines the source type (`docker-image`, `git` or `http`).
+ * `ref` is the reference of the source.
+ * `pin` is the source digest.
+* `deps` defines build dependencies of input contexts.
+
+### Image config
+
+A new field similar to the one for inline cache has been added to the image
+configuration to embed build dependencies:
+
+```json
+{
+ "moby.buildkit.buildinfo.v0": ""
+}
+```
+
+By default, the build dependencies are inlined in the image configuration. You
+can disable this behavior with the [`buildinfo` attribute](../README.md#imageregistry).
+
+### Exporter response (metadata)
+
+The solver response (`ExporterResponse`) also contains a new key
+`containerimage.buildinfo` with the same structure as image config encoded in
+base64:
+
+```json
+{
+ "ExporterResponse": {
+ "containerimage.buildinfo": "",
+ "containerimage.digest": "sha256:..."
+ }
+}
+```
+
+If multi-platforms are specified, they will be suffixed with the corresponding
+platform:
+
+```json
+{
+ "ExporterResponse": {
+ "containerimage.buildinfo/linux/amd64": "",
+ "containerimage.buildinfo/linux/arm64": "",
+ "containerimage.digest": "sha256:..."
+ }
+}
+```
+
+### Metadata JSON output
+
+If you're using the `--metadata-file` flag with [`buildctl`](../README.md#metadata),
+[`buildx build`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_build.md)
+or [`buildx bake`](https://github.com/docker/buildx/blob/master/docs/reference/buildx_bake.md):
+
+```shell
+jq '.' metadata.json
+```
+```json
+{
+ "containerimage.buildinfo": {
+ "frontend": "dockerfile.v0",
+ "attrs": {
+ "context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
+ "filename": "Dockerfile",
+ "source": "docker/dockerfile:master"
+ },
+ "sources": [
+ {
+ "type": "docker-image",
+ "ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
+ "pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
+ },
+ {
+ "type": "docker-image",
+ "ref": "docker.io/library/alpine:3.13",
+ "pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
+ }
+ ]
+ },
+ "containerimage.digest": "sha256:..."
+}
+```
diff --git a/docs/buildkitd.toml.md b/docs/buildkitd.toml.md
index d2c60339f199..50b7a670ac24 100644
--- a/docs/buildkitd.toml.md
+++ b/docs/buildkitd.toml.md
@@ -1,12 +1,5 @@
# buildkitd.toml
-## NAME
-
-buildkitd.toml - configuration file for buildkitd
-
-
-## DESCRIPTION
-
The TOML file used to configure the buildkitd daemon settings has a short
list of global settings followed by a series of sections for specific areas
of daemon configuration.
@@ -14,13 +7,11 @@ of daemon configuration.
The file path is `/etc/buildkit/buildkitd.toml` for rootful mode,
`~/.config/buildkit/buildkitd.toml` for rootless mode.
-## EXAMPLE
-
-The following is a complete **buildkitd.toml** configuration example,
-please note some of the configuration is only good for edge cases, please
-take care of it carefully.
+The following is a complete `buildkitd.toml` configuration example, please
+note some configuration is only good for edge cases, please take care of it
+carefully.
-```
+```toml
debug = true
# root is where all buildkit state is stored.
root = "/var/lib/buildkit"
@@ -38,6 +29,13 @@ insecure-entitlements = [ "network.host", "security.insecure" ]
key = "/etc/buildkit/tls.key"
ca = "/etc/buildkit/tlsca.crt"
+# config for build history API that stores information about completed build commands
+[history]
+ # maxAge is the maximum age of history entries to keep, in seconds.
+ maxAge = 172800
+ # maxEntries is the maximum number of history entries to keep.
+ maxEntries = 50
+
[worker.oci]
enabled = true
# platforms is manually configure platforms, detected automatically if unset.
@@ -57,6 +55,9 @@ insecure-entitlements = [ "network.host", "security.insecure" ]
apparmor-profile = ""
# limit the number of parallel build steps that can run at the same time
max-parallelism = 4
+ # maintain a pool of reusable CNI network namespaces to amortize the overhead
+ # of allocating and releasing the namespaces
+ cniPoolSize = 16
[worker.oci.labels]
"foo" = "bar"
@@ -77,6 +78,10 @@ insecure-entitlements = [ "network.host", "security.insecure" ]
gc = true
# gckeepstorage sets storage limit for default gc profile, in MB.
gckeepstorage = 9000
+ # maintain a pool of reusable CNI network namespaces to amortize the overhead
+ # of allocating and releasing the namespaces
+ cniPoolSize = 16
+
[worker.containerd.labels]
"foo" = "bar"
@@ -97,7 +102,7 @@ insecure-entitlements = [ "network.host", "security.insecure" ]
[[registry."docker.io".keypair]]
key="/etc/config/key.pem"
cert="/etc/config/cert.pem"
-
+
# optionally mirror configuration can be done by defining it as a registry.
[registry."yourmirror.local:5000"]
http = true
diff --git a/docs/deprecated.md b/docs/deprecated.md
new file mode 100644
index 000000000000..c6c75f0b8ae6
--- /dev/null
+++ b/docs/deprecated.md
@@ -0,0 +1,49 @@
+# Deprecated features
+
+This page provides an overview of features that are deprecated in BuildKit.
+
+As changes are made to BuildKit there may be times when existing features need
+to be removed or replaced with newer features. Before an existing feature is
+removed it is labeled as "deprecated" within the documentation and remains in
+BuildKit for at least one stable release unless specified explicitly otherwise.
+After that time it may be removed.
+
+Users are expected to take note of the list of deprecated features each release
+and plan their migration away from those features, and (if applicable) towards
+the replacement features as soon as possible.
+
+The table below provides an overview of the current status of deprecated
+features:
+
+- **Deprecated**: the feature is marked "deprecated" and should no longer be
+ used. The feature may be removed, disabled, or change behavior in a future
+ release. The _"Deprecated"_ column contains the release in which the feature
+ was marked deprecated, whereas the _"Remove"_ column contains a tentative
+ release in which the feature is to be removed. If no release is included in
+ the _"Remove"_ column, the release is yet to be decided on.
+- **Removed**: the feature was removed, disabled, or hidden. Refer to the linked
+ section for details. Some features are "soft" deprecated, which means that
+ they remain functional for backward compatibility, and to allow users to
+ migrate to alternatives. In such cases, a warning may be printed, and users
+ should not rely on this feature.
+
+| Status | Feature | Deprecated | Remove | Recommendation |
+|------------|-----------------------------------------|------------|--------|------------------------------------------------------------------|
+| Deprecated | [Build information](#build-information) | v0.11 | v0.12 | Use [provenance attestations](./attestations/slsa-provenance.md) |
+
+## Build information
+
+[Build information](https://github.com/moby/buildkit/blob/v0.11/docs/buildinfo.md)
+structures have been introduced in [BuildKit v0.10.0](https://github.com/moby/buildkit/releases/tag/v0.10.0)
+and are generated with build metadata that allows you to see all the sources
+(images, git repositories) that were used by the build with their exact
+versions and also the configuration that was passed to the build. This
+information is also embedded into the image configuration if one is generated.
+
+With the introduction of [provenance attestations](./attestations/slsa-provenance.md)
+in [BuildKit v0.11.0](https://github.com/moby/buildkit/releases/tag/v0.11.0),
+the build information feature has been deprecated and will be removed in the
+next release.
+
+To completely disable the build information feature, set the build-arg
+`BUILDKIT_BUILDINFO=false`.
diff --git a/docs/dev/README.md b/docs/dev/README.md
new file mode 100644
index 000000000000..0c3c358d71ec
--- /dev/null
+++ b/docs/dev/README.md
@@ -0,0 +1,53 @@
+# BuildKit Developer Docs
+
+These are the BuildKit developer docs, designed to be read by technical users
+interested in contributing to or integrating with BuildKit.
+
+> **Warning**
+>
+> While these docs attempt to keep up with the current state of our `master`
+> development branch, the code is constantly changing and updating, as bugs are
+> fixed, and features are added. Remember, the ultimate source of truth is
+> always the code base.
+
+
+## Video
+
+You can find recording for "BuildKit architecture and internals" session in [here](https://drive.google.com/file/d/1zGMQipL5WJ3sLySu7gHZ_o6tFpxRXRHs/view) ([slides](https://docs.google.com/presentation/d/1tEnuMOENuoVQ3l6viBmguYUn7XpjIHIC-3RHzfyIgjc/edit?usp=sharing)). This session gives an overview how BuildKit works under the hood and how it was designed. If you’re thinking about contributing to BuildKit, this session should give you an overview of the most important components that make up BuildKit and how they work together.
+
+## Jargon
+
+The following terms are often used throughout the codebase and the developer
+documentation to describe different components and processes in the image build
+process.
+
+| Name | Description |
+| :--- | :---------- |
+| **LLB** | LLB stands for low-level build definition, which is a binary intermediate format used for defining the dependency graph for processes running part of your build. |
+| **Definition** | Definition is the LLB serialized using protocol buffers. This is the protobuf type that is transported over the gRPC interfaces. |
+| **Frontend** | Frontends are builders of LLB and may issue requests to Buildkit’s gRPC server like solving graphs. Currently there is only `dockerfile.v0` and `gateway.v0` implemented, but the gateway frontend allows running container images that function as frontends. |
+| **State** | State is a helper object to build LLBs from higher level concepts like images, shell executions, mounts, etc. Frontends use the state API in order to build LLBs and marshal them into the definition. |
+| **Solver** | Solver is an abstract interface to solve a graph of vertices and edges to find the final result. An LLB solver is a solver that understands that vertices are implemented by container-based operations, and that edges map to container-snapshot results. |
+| **Vertex** | Vertex is a node in a build graph. It defines an interface for a content addressable operation and its inputs. |
+| **Op** | Op defines how the solver can evaluate the properties of a vertex operation. An op is retrieved from a vertex and executed in the worker. For example, there are op implementations for image sources, git sources, exec processes, etc. |
+| **Edge** | Edge is a connection point between vertices. An edge references a specific output a vertex’s operation. Edges are used as inputs to other vertices. |
+| **Result** | Result is an abstract interface return value of a solve. In LLB, the result is a generic interface over a container snapshot. |
+| **Worker** | Worker is a backend that can run OCI images. Currently, Buildkit can run with workers using either runc or containerd. |
+
+## Table of Contents
+
+The developer documentation is split across various files.
+
+For an overview of the process of building images:
+
+- [Request lifecycle](./request-lifecycle.md) - observe how incoming requests
+ are solved to produce a final artifact.
+- [Dockerfile to LLB](./dockerfile-llb.md) - understand how `Dockerfile`
+ instructions are converted to the LLB format.
+- [Solver](./solver.md) - understand how LLB is evaluated by the solver to
+ produce the solve graph.
+
+We also have a number of more specific guides:
+
+- [MergeOp and DiffOp](./merge-diff.md) - learn how MergeOp and DiffOp are
+ implemented, and how to program with them in LLB.
diff --git a/docs/dev/dockerfile-llb.md b/docs/dev/dockerfile-llb.md
new file mode 100644
index 000000000000..1df0c853fc3d
--- /dev/null
+++ b/docs/dev/dockerfile-llb.md
@@ -0,0 +1,212 @@
+# Dockerfile conversion to LLB
+
+If you want to understand how Buildkit translates Dockerfile instructions into
+LLB, or you want to write your own frontend, then seeing how Dockerfile maps to
+using the Buildkit LLB package will give you a jump start.
+
+The `llb` package from Buildkit provides a chainable state object to help
+construct a LLB. Then you can marshal the state object into a definition using
+protocol buffers, and send it off in a solve request over gRPC.
+
+In code, these transformations are performed by the [`Dockerfile2LLB()`](../../frontend/dockerfile/dockerfile2llb/convert.go)
+function, which takes a raw `Dockerfile`'s contents and converts it to an LLB
+state, and associated image config, which are then both assembled in the
+[`Build()`](../../frontend/dockerfile/builder/build.go) function.
+
+## Basic examples
+
+Here are a few Dockerfile instructions you should be familiar with:
+
+- Base image
+
+ ```dockerfile
+ FROM golang:1.12
+ ```
+
+ ```golang
+ st := llb.Image("golang:1.12")
+ ```
+
+- Scratch image
+
+ ```dockerfile
+ FROM scratch
+ ```
+
+ ```golang
+ st := llb.Scratch()
+ ```
+
+- Environment variables
+
+ ```dockerfile
+ ENV DEBIAN_FRONTEND=noninteractive
+ ```
+
+ ```golang
+ st = st.AddEnv("DEBIAN_FRONTEND", "noninteractive")
+ ```
+
+- Running programs
+
+ ```dockerfile
+ RUN echo hello
+ ```
+
+ ```golang
+ st = st.Run(
+ llb.Shlex("echo hello"),
+ ).Root()
+ ```
+
+- Working directory
+
+ ```dockerfile
+ WORKDIR /path
+ ```
+
+ ```golang
+ st = st.Dir("/path")
+ ```
+
+## File operations
+
+This is where LLB starts to deviate from Dockerfile in features. In
+Dockerfiles, the run command is completely opaque to the builder and just
+executes the command. But in LLB, there are file operations that have better
+caching semantics and understanding of the command:
+
+- Copying files
+
+ ```dockerfile
+ COPY --from=builder /files/* /files
+ ```
+
+ ```golang
+ var CopyOptions = &llb.CopyInfo{
+ FollowSymlinks: true,
+ CopyDirContentsOnly: true,
+ AttemptUnpack: false,
+ CreateDestPath: true,
+ AllowWildcard: true,
+ AllowEmptyWildcard: true,
+ }
+ st = st.File(
+ llb.Copy(builder, "/files/*", "/files", CopyOptions),
+ )
+ ```
+
+- Adding files
+
+ ```dockerfile
+ ADD --from=builder /files.tgz /files
+ ```
+
+ ```golang
+ var AddOptions = &llb.CopyInfo{
+ FollowSymlinks: true,
+ CopyDirContentsOnly: true,
+ AttemptUnpack: true,
+ CreateDestPath: true,
+ AllowWildcard: true,
+ AllowEmptyWildcard: true,
+ }
+ st = st.File(
+ llb.Copy(builder, "/files.tgz", "files", AddOptions),
+ )
+ ```
+
+- Chaining file commands
+
+ ```dockerfile
+ # not possible without RUN in Dockerfile
+ RUN mkdir -p /some && echo hello > /some/file
+ ```
+
+ ```golang
+ st = st.File(
+ llb.Mkdir("/some", 0755),
+ ).File(
+ llb.Mkfile("/some/file", 0644, "hello"),
+ )
+ ```
+
+## Bind mounts
+
+Bind mounts allow unidirectional syncing of the host's local file system into
+the build environment.
+
+Bind mounts in Buildkit should not be confused with bind mounts in the linux
+kernel - they do not sync bidirectionally. Bind mounts are only a snapshot of
+your local state, which is specified through the `llb.Local` state object:
+
+- Using bind mounts
+
+ ```dockerfile
+ WORKDIR /builder
+ RUN --mount=type=bind,target=/builder \
+ PIP_INDEX_URL=https://my-proxy.com/pypi \
+ pip install .
+ ```
+
+ ```golang
+ localState := llb.Local(
+ "context",
+ llb.SessionID(client.BuildOpts().SessionID),
+ llb.WithCustomName("loading .")
+ llb.FollowPaths([]string{"."}),
+ )
+
+ execState = st.Dir("/builder").Run(
+ llb.Shlex("pip install ."),
+ llb.AddEnv(
+ "PIP_INDEX_URL",
+ "https://my-proxy.com/pypi",
+ ),
+ )
+ _ := execState.AddMount("/builder", localState)
+ // the return value of AddMount captures the resulting state of the mount
+ // after the exec operation has completed
+
+ st := execState.Root()
+ ```
+
+## Cache mounts
+
+Cache mounts allow for a shared file cache location between build invocations,
+which allow manually caching expensive operations, such as package downloads.
+Mounts have options to persist between builds with different sharing modes.
+
+- Using cache mounts
+
+ ```dockerfile
+ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt \
+ apt-get update
+ ```
+
+ ```golang
+ var VarCacheAptMount = llb.AddMount(
+ "/var/cache/apt",
+ llb.Scratch(),
+ llb.AsPersistentCacheDir(
+ "some-cache-id",
+ llb.CacheMountLocked,
+ ),
+ )
+
+ var VarLibAptMount = llb.AddMount(
+ "/var/lib/apt",
+ llb.Scratch(),
+ llb.AsPersistentCacheDir(
+ "another-cache-id",
+ llb.CacheMountShared,
+ ),
+ )
+
+ st := st.Run(
+ llb.Shlex("apt-get update"),
+ VarCacheAptMount,
+ VarLibAptMount,
+ ).Root()
+ ```
diff --git a/docs/dev/merge-diff.md b/docs/dev/merge-diff.md
new file mode 100644
index 000000000000..ba0bf19849d1
--- /dev/null
+++ b/docs/dev/merge-diff.md
@@ -0,0 +1,684 @@
+# Merge and Diff Ops
+
+MergeOp and DiffOp are two interrelated LLB operations that enable the rebasing
+of LLB results onto other results and the separation of LLB results from their
+base, respectively. Underneath the hood, these ops enable fine grain
+manipulation of container layer chains that can result in highly efficient
+operations for many use cases.
+
+This doc assumes some familiarity with LLB and ops like ExecOp and FileOp. More
+background on LLB can be obtained from the README.md in Buildkit's git
+repository. This doc also uses the Go LLB client for examples, though MergeOp
+and DiffOp are not in any way language specific.
+
+## MergeOp
+
+MergeOp has a very simple interface:
+
+```go
+func Merge(inputs []llb.State) llb.State
+```
+
+The intuition is that it merges the contents of the provided states together
+into one state (hence the name), with files from later states taking precedence
+over those from earlier ones.
+
+To be more concrete, MergeOp returns a state where each of the input states are
+rebased on top of each other in the order provided. "Rebasing" a state `B` onto
+another state `A` creates a state that:
+
+- Has all the contents of `B`
+- Has all the contents of `A` except when a path exists in both `B` and `A`. In this case:
+ - If both paths are directories, their contents are merged. Metadata (such
+ as permissions) on the directory from `B` take precedence.
+ - If one of the paths is not a directory, whatever is present in `B` takes
+ precedence. This also means that if a file in `B` overwrites a dir in `A`,
+ then all files/dirs in the tree under at that path in `A` are also
+ removed.
+
+MergeOp is associative, i.e. using shorthand notation: `Merge(A, B, C) ==
+Merge(Merge(A, B), C) == Merge(A, Merge(B, C))`. Buildkit knows this and
+internally optimizes LLB merges that are equivalent in this way to re-use the
+same cache entries.
+
+There are more subtleties to the behavior of MergeOp, such as when deletions
+are present in a layer making up a state, discussed in the "Advanced Details"
+section of this doc.
+
+States created by MergeOp are the same as any other LLB states in that they can
+be used as the base for exec, be mounted to arbitrary paths in execs, be
+plugged into other merges and diffs, be exported, etc.
+
+As a very simple example:
+
+```go
+// a has /dir/a
+a := llb.Scratch().
+ File(llb.Mkdir("/dir", 0755)).
+ File(llb.Mkfile("/dir/a", 0644, []byte("a")))
+
+// b has /dir/b and /otherdir
+b := llb.Scratch().
+ File(llb.Mkdir("/dir", 0755)).
+ File(llb.Mkfile("/dir/b", 0644, []byte("b"))).
+ File(llb.Mkdir("/otherdir", 0755))
+
+// c has /dir/a and /dir/c
+c := llb.Scratch().
+ File(llb.Mkdir("/dir", 0700)).
+ File(llb.Mkfile("/dir/a", 0644, []byte("overwritten"))).
+ File(llb.Mkfile("/dir/c", 0644, []byte("c")))
+
+// merged will consist of /dir/a, /dir/b, /dir/c and /otherdir.
+// The file at /dir/a will have contents set to "overwritten" because c is merged after a.
+// /dir will have permissions set to 0700 for the same reason.
+merged := llb.Merge([]llb.State{a, b, c})
+
+// merged can be used as the base for new states
+mergedPlusMore := merged.File(llb.Mkdir("/yetanotherdir", 0755))
+// or as the input to other merges
+mergedPlusMore = llb.Merge([]llb.State{merged, llb.Scratch().File(llb.Mkdir("/yetanotherdir", 0755))})
+```
+
+### MergeOp Container Image Export
+
+When the result of a MergeOp is exported as a container image, the image will
+consist of the layers making up each input joined together in the order of the
+MergeOp. If Buildkit has cached any one of these layers already, they will not
+need to be re-exported (i.e. re-packaged into compressed tarballs).
+Additionally, if the image is being pushed to a registry and the registry
+indicates it already has any of the layers, then Buildkit can skip pushing
+those layers entirely.
+
+Layers joined together by MergeOp do not have dependencies on each other, so a
+cache invalidation of the layers of one input doesn't cascade to the layers of
+the other inputs.
+
+## DiffOp
+
+DiffOp also has a very simple interface:
+
+```go
+func Diff(lower llb.State, upper llb.State) llb.State
+```
+
+The intuition is that it returns a state whose contents are the difference
+between `lower` and `upper`. It can be viewed as something like the inverse of
+MergeOp; whereas MergeOp "adds" states together, DiffOp "subtracts" `lower`
+from `upper` (in a manner of speaking).
+
+More specifically, DiffOp returns a state that has the contents present in
+`upper` that either aren't present in `lower` or have changed from `lower` to
+`upper`. Another way of thinking about it is that if you start at `A` and apply
+`Diff(A, B)`, you will end up at `B`. Or, even more succinctly, `Merge(A,
+Diff(A, B)) == B`.
+
+Files and dirs are considered to have changed between `lower` and `upper` if
+their contents are unequal or if metadata like permissions and `mtime` have
+changed. Unequal `atime` or `ctime` values are not considered to be a change.
+
+There are more subtleties to the behavior of DiffOp discussed in the "Advanced
+Details" section of this doc.
+
+States created by DiffOp are the same as any other LLB states in that they can
+be used as the base for exec, be mounted to arbitrary paths in execs, be
+plugged into merges and other diffs, be exported, etc.
+
+As a very simple example:
+
+```go
+base := llb.Image("alpine")
+basePlusBuilt := base.Run(llb.Shlex("touch /foo")).Root()
+// diffed consists of just the file /foo, nothing in the alpine image is present
+diffed := llb.Diff(base, basePlusBuilt)
+```
+
+### DiffOp Container Image Export
+
+When the result of a DiffOp is exported as a container image, layers will be
+re-used as much as possible. To explain, consider this case:
+
+```go
+lower := llb.Image("alpine")
+middle := lower.Run(llb.Shlex("touch /foo")).Root()
+upper := middle.Run(llb.Shlex("touch /bar")).Root()
+diff := llb.Diff(lower, upper)
+```
+
+In this case, there is a "known chain" from `lower` to `upper` because `lower`
+is a state in `upper`'s history. This means that when the DiffOp is exported as
+a container image, it can just consist of the container layers for `middle`
+joined with the container layers for `upper`.
+
+Another way of thinking about this is that when `lower` is a state in `upper`'s
+history, the diff between the two is equivalent to a merge of the states
+between them. So, using the example above:
+
+```go
+llb.Diff(lower, upper) == llb.Merge([]llb.State{
+ llb.Diff(lower, middle),
+ llb.Diff(middle, upper),
+})
+```
+
+This behavior extends to arbitrary numbers of states separating `lower` and `upper`.
+
+In the case where there is not a chain between `lower` and `upper` that
+Buildkit can determine, DiffOp still works consistently but, when exported,
+will always result in a single layer that is not re-used from its inputs.
+
+## Example Use Case: Better "Copy Chains" with MergeOp
+
+### The Problem
+
+A common pattern when building container images is to independently assemble
+components of the image and then combine those components together into a final
+image using a chain of Copy FileOps. For example, when using the Dockerfile
+frontend, this is the multi-stage build pattern and a chain of `COPY
+--from=...` statements.
+
+One issue with this type of pattern is that if any of the inputs to the copy
+chain change, that doesn't just invalidate Buildkit's cache for that input, it
+also invalidates Buildkit's cache for any copied layers after that one.
+
+To be a bit more concrete, consider the following LLB as specified with the Go client:
+
+```go
+// stage a
+a := llb.Image("alpine").Run("build a").Root()
+// stage b
+b := llb.Image("alpine").Run("build b").Root()
+// stage c
+c := llb.Image("alpine").Run("build c").Root()
+
+// final combined stage
+combined := llb.Image("alpine").
+ File(llb.Copy(a, "/bin/a", "/usr/local/bin/a")).
+ File(llb.Copy(b, "/bin/b", "/usr/local/bin/b")).
+ File(llb.Copy(c, "/bin/c", "/usr/local/bin/c"))
+```
+
+Note that this is basically the equivalent of the following Dockerfile:
+
+```dockerfile
+FROM alpine as a
+RUN build a
+
+FROM alpine as b
+RUN build b
+
+FROM alpine as c
+RUN build c
+
+FROM alpine as combined
+COPY --from=a /bin/a /usr/local/bin/a
+COPY --from=b /bin/b /usr/local/bin/b
+COPY --from=c /bin/c /usr/local/bin/c
+```
+
+Now, say you do a build of this LLB and export the `combined` stage as a
+container image to a registry. If you were to then repeat the same build with
+the same instance of Buildkit, each part of the build should be cached,
+resulting in no work needing to be done and no layers needing to be exported or
+pushed to the registry.
+
+Then, say you later do the build again but this time with a change to `a`. The
+build for `a` is thus not cached, which means that the copy of `/bin/a` into
+`/usr/local/bin/a` of `combined` is also not cached and has to be re-run. The
+problem is that because each copy in to `combined` is chained together, the
+invalidation of the copy from `a` also cascades to its descendants, namely the
+copies from `b` and `c`. This is despite the fact that `b` and `c` are
+independent of `a` and thus don't need to be invalidated. In graphical form:
+
+```mermaid
+graph TD
+ alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0")
+ alpine -->|CACHE HIT fa:fa-check| B("build b")
+ alpine -->|CACHE HIT fa:fa-check| C("build c")
+
+ A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a)
+ busybox("busybox") -->|CACHE HIT fa:fa-check| ACopy
+ B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b)
+ ACopy -->|CACHE MISS fa:fa-ban| BCopy
+ C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c)
+ BCopy -->|CACHE MISS fa:fa-ban| CCopy
+
+ classDef green fill:#5aa43a,stroke:#333,stroke-width:2px;
+ class alpine,B,C,busybox green
+ classDef red fill:#c72020,stroke:#333,stroke-width:2px;
+ class A,ACopy,BCopy,CCopy red
+```
+
+As a result, not only do the copies from `b` and `c` to create
+`/usr/local/bin/b` and `/usr/local/bin/c` need to run again, they also result
+in new layers needing to be exported and then pushed to a registry. For many
+use cases, this becomes a significant source of overhead in terms of build
+times and the amount of data that needs to be stored and transferred.
+
+### The Solution
+
+MergeOp can be used to fix the problem of cascading invalidation in copy chains:
+
+```go
+a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/local/bin/a"))
+b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/local/bin/b"))
+c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/local/bin/c"))
+combined := llb.Merge([]llb.State{
+ llb.Image("busybox"),
+ a,
+ b,
+ c,
+})
+```
+
+(*Note that newer versions of Dockerfiles support a `--link` flag when using
+`COPY`, which results in basically this same pattern*)
+
+Two changes have been made from the previous version:
+
+1. `a`, `b`, and `c` have been updated to copy their desired contents to
+ `Scratch` (a new, empty state).
+1. `combined` is defined as a MergeOp of the states desired in the final image.
+
+Say you're doing this build for the first time. The build will first create
+states `a`, `b`, and `c`, resulting in each being a single layer consisting
+only of contents `/usr/local/bin/a`, `/usr/local/bin/b`, and `/usr/local/bin/c`
+respectively. Then, the MergeOp rebases each of those states on to the base
+`busybox` image. As discussed earlier, the container image export of a MergeOp
+will consist of the layers of the merge inputs joined together, so the final
+image looks mostly the same as before.
+
+The benefits of MergeOp become apparent when considering what happens if the
+build of `a` is modified. Whereas before this led to invalidation of the copy
+of `b` and `c`, now those merge inputs are completely unaffected; no new cache
+entries or new container layers need to be created for them. So, the end result
+is that the only work Buildkit does when `a` changes is re-build `a` and then
+push the new layers for `/usr/local/bin/a` (plus a new image manifest).
+`/usr/local/bin/b` and `/usr/local/bin/c` do not need to be re-exported and do
+not need to be re-pushed to the registry. In graphical form:
+
+```mermaid
+graph TD
+ alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0")
+ alpine -->|CACHE HIT fa:fa-check| B("build b")
+ alpine -->|CACHE HIT fa:fa-check| C("build c")
+
+ busybox("busybox") -->|CACHE HIT fa:fa-check| Merge("Merge (lazy)")
+ A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a)
+ ACopy -->|CACHE MISS fa:fa-ban| Merge
+ B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b)
+ BCopy -->|CACHE HIT fa:fa-check| Merge
+ C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c)
+ CCopy -->|CACHE HIT fa:fa-check| Merge
+
+ classDef green fill:#5aa43a,stroke:#333,stroke-width:2px;
+ class alpine,B,BCopy,C,CCopy,busybox green
+ classDef red fill:#c72020,stroke:#333,stroke-width:2px;
+ class A,ACopy red
+```
+
+An important aspect of this behavior is that MergeOp is implemented lazily,
+which means that its on-disk filesystem representation is only created locally
+when strictly required. This means that even though a change to `a` invalidates
+the MergeOp as a whole, no work needs to be done to create the merged state
+on-disk when it's only being exported as a container image. This laziness
+behavior is discussed more in the "Performance Considerations" section of the
+doc.
+
+You can see a working-code example of this by comparing `examples/buildkit3`
+with `examples/buildkit4` in the Buildkit git repo.
+
+## Example Use Case: Remote-only Image Append with MergeOp
+
+If you have some layers already pushed to a remote registry, MergeOp allows you
+to create new images that combine those layers in arbitrary ways without having
+to actually pull any layers down first. For example:
+
+```go
+foo := llb.Image("fooApp:v0.1")
+bar := llb.Image("barApp:v0.3")
+qaz := llb.Image("qazApp:v1.2")
+merged := llb.Merge([]llb.State{foo, bar, qaz})
+```
+
+If `merged` is being exported to the same registry that already has the layers
+for `fooApp`, `barApp` and `qazApp`, then the only thing Buildkit does during
+the export is create an image manifest (just some metadata) and push it to the
+registry. No layers need to be pushed (they are already there) and they don't
+even need to be pulled locally to Buildkit either.
+
+Note that if you were to instead do this:
+
+```go
+merged := llb.Merge([]llb.State{foo, bar, qaz}).Run(llb.Shlex("extra command")).Root()
+```
+
+Then `fooApp`, `barApp` and `qazApp` will need to be pulled, though they will
+usually be merged together more efficiently than the naive solution of just
+unpacking the layers on top of each other. See the "Performance Details"
+section for more info.
+
+Additionally, if you export your Buildkit cache to a registry, this same idea
+can be extended to any LLB types, not just `llb.Image`. So, using the same
+example as the previous use case:
+
+```go
+a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/bin/a"))
+b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/bin/b"))
+c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/bin/c"))
+combined := llb.Merge([]llb.State{
+ llb.Image("alpine"),
+ a,
+ b,
+ c,
+})
+```
+
+If you do a build that includes a remote cache export to a registry, then any
+Buildkit worker importing that cache can run builds that do different merges of
+those layers without having to pull anything down. For instance, if a separate
+Buildkit worker imported that remote cache and then built this:
+
+```go
+combined2 := llb.Merge([]llb.State{
+ c,
+ a
+})
+```
+
+An export of `combined2` would not need to pull any layers down because it's
+just a merge of `c` and `a`, which already have layers in the registry thanks
+to the remote cache. This works because a remote cache import is actually just
+a metadata download; layers are only pulled locally once needed and they aren't
+needed for this MergeOp.
+
+## Example Use Case: Modeling Package Builds with MergeOp+DiffOp
+
+Merge and Diff have many potential use cases, but one primary one is to assist
+higher level tooling that's using LLB to model "dependency-based builds", such
+as what's found in many package managers and other build systems.
+
+More specifically, the following is a common pattern used to model the build of a "package" (or equivalent concept) in such systems:
+
+1. The build-time dependencies of the package are combined into a filesystem.
+ The dependencies are themselves just already-built packages.
+1. A build is run by executing some commands that have access to the combined
+ dependencies, producing new build artifacts that are somehow isolated from
+ the dependencies. These isolated build artifacts become the new package's
+ contents.
+1. The new package can then be used as a dependency of other packages and/or
+ served directly to end users, while being careful to ensure that any runtime
+ dependencies are also present when the package needs to be utilized.
+
+One way to adapt the above model to LLB might be like this:
+
+```go
+// "Packages" are just LLB states. Build-time dependencies are combined
+// together into a filesystem using MergeOp.
+runtimeDeps := llb.Merge([]llb.State{depC, depD})
+buildDeps := llb.Merge([]llb.State{src, depA, depB, runtimeDeps})
+
+// Builds of a new package are ExecOps on top of the MergeOp from the previous step
+// (one ExecOp for the build and one for the install). The install ExecOp is defined
+// such that build artifacts are written to a dedicated Mount, isolating them from
+// the dependencies under /output.
+builtPackage := buildDeps.Run(
+ llb.Dir("/src"),
+ llb.Shlex("make"),
+).Root().Run(
+ llb.Dir("/src"),
+ llb.Shlex("make install"),
+ llb.AddEnv("DESTDIR", "/output"),
+ llb.AddMount("/output", llb.Scratch()),
+).GetMount("/output")
+
+// If the package needs to be run as part of a different build or by an
+// end user, the runtime deps of the state can be included via a MergeOp.
+llb.Merge([]llb.State{runtimeDeps, builtPackage})
+```
+
+While the above is a bit of an over-simplification (it, for instance, ignores
+the need to topologically sort dependency DAGs before merging them together),
+the important point is that it only needs MergeOp and ExecOp; DiffOp is left
+out entirely. For many use cases, this is completely fine and DiffOp is not
+needed.
+
+Some use cases can run into issues though, specifically with the part where
+build artifacts need to be isolated from their dependencies. The above example
+uses the convention of setting `DESTDIR`, an environment variable that
+specifies a directory that `make install` should place artifacts under. Most
+build systems support either `DESTDIR` or some type of equivalent mechanism for
+isolating installed build artifacts. However, there are times when this
+convention is either not available or not desired, in which case DiffOp can
+come to the rescue as a generic, tool-agnostic way of separating states out
+from their original dependency base. The modification from the previous example
+is quite small:
+
+```go
+// Same `make` command as before
+buildBase := buildDeps.Run(
+ llb.Dir("/src"),
+ llb.Shlex("make"),
+).Root()
+
+// Now, `make install` doesn't use DESTDIR and just installs directly
+// to the rootfs of the build. The package contents are instead isolated
+// by diffing the rootfs from before and after the install command.
+builtPackage := llb.Diff(buildBase, buildBase.Run(
+ llb.Dir("/src"),
+ llb.Shlex("make install"),
+).Root())
+```
+
+This approach using DiffOp should achieve the same end result as the previous
+version but without having to rely on `DESTDIR` support being present in the
+`make install` step.
+
+The fact that DiffOp is more generic and arguably simpler than setting
+`DESTDIR` or equivalents doesn't mean it's strictly better for every case. The
+following should be kept in mind when dealing with use cases where both
+approaches are viable:
+
+1. The version that uses `DESTDIR` will likely have *slightly* better
+ performance than the version using DiffOp for many use cases. This is because
+ it's faster for Buildkit to merge in a state that is just a single layer on top
+ of scratch (i.e. the first version of `builtPackage` that used `DESTDIR`) than
+ it is to merge in a state whose diff is between two non-empty states (i.e. the
+ DiffOp version). Whether the performance difference actually matters needs to
+ be evaluated on a case-by-case basis.
+1. DiffOp has some subtle behavior discussed in the "Advanced Details" section
+ that, while irrelevant to most use cases, can occasionally distinguish it from
+ the `DESTDIR` approach.
+
+## Performance Considerations
+
+### Laziness
+
+MergeOp and DiffOp are both implemented lazily in that their on-disk filesystem
+representations will only be created when absolutely necessary.
+
+The most common situation in which a Merge/Diff result will need to be
+"unlazied" (created on disk) is when it is used as the input to an Exec or File
+op. For example:
+
+```go
+rootfs := llb.Merge([]llb.State{A, B})
+extraLayer := rootfs.Run(llb.Shlex("some command")).Root()
+```
+
+In this case, if `extraLayer` is not already cached, `extraLayer` will need
+`rootfs` to exist on disk in order to run, so `rootfs` will have to be
+unlazied. The same idea applies if `extraLayer` was defined as a FileOp or if
+`rootfs` was defined using a `DiffOp`.
+
+What's perhaps more interesting are cases in which merge/diff results *don't*
+need to be unlazied. One such situation is when they are exported as a
+container image. As discussed previously, layers from the inputs of merge/diff
+are re-used as much as possible during image exports, so that means that the
+final merged/diffed result is not needed, only the inputs.
+
+Another situation that doesn't require unlazying is when a merge/diff is used
+as an input to another merge/diff. For example:
+
+```go
+diff1 := llb.Diff(A, B)
+diff2 := llb.Diff(C, D)
+merge := llb.Merge([]llb.State{diff1, diff2})
+```
+
+In this case, even though `diff1` and `diff2` are used as an input to `merge`, they do not need to be unlazied because `merge` is also lazy. If `A`, `B`, `C` or `D` are lazy LLB states, they also do not need to be unlazied. Laziness is transitive in this respect.
+
+### Snapshotter-dependent Optimizations
+
+There are some optimizations in the implementation of Merge and Diff op that
+are relevant to users concerned with scaling large builds involving many
+different merges and/or diffs. These optimizations are ultimately
+implementation details though and don't have any impact on the actual contents
+of merge/diff results.
+
+When a merge or diff result needs to be unlazied, the "universal" fallback
+implementation that works for all snapshotter backends is to create them by
+copying files from the inputs as needed into a new filesystem. This works but
+it can become costly in terms of disk space and CPU time at a certain scale.
+
+However, for two of the default snapshotters (overlay and native), there is an
+optimization in place to avoid copying files and instead hardlink them from the
+inputs into the merged/diffed filesystem. This is at least as fast as copying
+the files and often significantly faster for inputs with large file sizes.
+
+## Advanced Details
+
+These details are not expected to impact many use cases, but are worth
+reviewing if you are experiencing surprising behavior while using Merge and
+Diff op or otherwise want to understand them at a deeper level.
+
+### Layer-like Behavior of Merge and Diff
+
+One important principal of LLB results is that when they are exported as
+container images, an external runtime besides Buildkit that pulls and unpacks
+the image must see the same filesystem that is seen during build time.
+
+That may seem a bit obvious, but it has important implications for Merge and
+Diff, which are ops that are designed to re-use container layers from their
+inputs as much as possible in order to maximize cache re-use and efficiency.
+Many of the more surprising aspects of the behavior discussed in the rest of
+this doc are a result of needing to ensure that Merge+Diff results look the
+same before and after export as container layers.
+
+### Deletions
+
+When either 1) an LLB state deletes a file present in its parent chain or 2)
+`upper` lacks a path that is present in `lower` while using DiffOp, that
+deletion is considered an "entity" in the same way that a directory or file is
+and can have an effect when using that state as a merge input. For example:
+
+```go
+// create a state that only has /foo
+foo := llb.Scratch().File(llb.Mkfile("/foo", 0644, nil))
+
+// create a state where the file /foo has been removed, leaving nothing
+rmFoo := foo.File(llb.Rm("/foo"))
+
+// create a state containing the file /bar on top of the previous "empty" state
+bar := rmFoo.File(llb.Mkfile("/bar", 0644, nil))
+
+merged := llb.Merge([]llb.State{foo, bar})
+```
+
+You might assume that `merged` would consist of the files `/foo` and `/bar`,
+but it will actually just consist of `/bar`. This is because the state `bar`
+also includes a deletion of the file `/foo` in its chain and thus a part of its
+definition.
+
+One way of understanding this is that when you merge `foo` and `bar`, you are
+actually merging the diffs making up each state in the chain that created `foo`
+and `bar`, i.e.:
+
+```go
+llb.Merge([]llb.State{foo, bar}) == llb.Merge([]llb.State{
+ // foo's chain (only 1 layer)
+ llb.Diff(llb.Scratch(), foo), // create /foo
+ // bar's chain (3 layers)
+ llb.Diff(llb.Scratch(), foo), // create /foo
+ llb.Diff(foo, rmFoo), // delete /foo
+ llb.Diff(rmFoo, bar), // create /bar
+})
+```
+
+As you can see, `Diff(foo, rmFoo)` is included there and its only "content" is
+a deletion of `/foo`. Therefore, when `merged` is being constructed, it will
+apply that deletion and `/foo` will not exist in the final `merged` result.
+
+Also note that if the order of the merge was reversed to be `Merge([]State{bar,
+foo})`, then `/foo` will actually exist in `merged` alongside `/bar` because
+then the contents of `foo` take precedent over the contents of `bar`, and then
+create of `/foo` therefore "overwrites" the previous deletion of it.
+
+One final detail to note is that even though deletions are entities in the same
+way files/dirs are, they do not show up when mounted. For example, if you were
+to mount `llb.Diff(foo, rmFoo)` during a build, you would just see an empty
+directory. Deletions only have an impact when used as an input to MergeOp.
+
+#### Workarounds
+
+For use cases that are experiencing this behavior and do not want it, the best
+option is to find a way to avoid including the problematic deletion in your
+build definition. This can be very use-case specific, but using the previous
+example one option might be this:
+
+```go
+justBar := llb.Diff(rmFoo, bar)
+merged := llb.Merge([]llb.State{foo, justBar})
+```
+
+Now, `merged` consists of both `/foo` and `/bar` because `justBar` has "diffed
+out" its parent `rmFoo` and consists only of the final layer that creates
+`/bar`. Other use cases may require different approaches like changing build
+commands to avoid unneeded deletions of files and directories.
+
+For use cases that can't avoid the deletion for whatever reason, the fallback
+option is to use a Copy op to squash the merge input and discard any deletions.
+So, building off the previous example:
+
+```go
+squashedBar := llb.Scratch().File(llb.Copy(bar, "/", "/"))
+merged := llb.Merge([]llb.State{foo, squashedBar})
+```
+
+This results in `merged` consisting of both `/foo` and `/bar`. This is because
+`squashedBar` is a single layer that only consists of the file+directories that
+existed in `bar`, not any of its deletions.
+
+Note that there are currently performance tradeoffs to this copy approach in
+that it will actually result in a copy on disk (i.e. no hardlink
+optimizations), the copy will not be lazy and `squashedBar` will be a distinct
+layer from its inputs as far as the Buildkit cache and any remote registries
+are concerned, which may or may not matter depending on the use-case.
+
+### Diff Corner Cases
+
+There are some cases where it's ambiguous what the right behavior should be
+when merging diffs together. As stated before, Merge+Diff resolve these
+ambiguities by following the same behavior as container image import/export
+implementations in order to maintain consistency.
+
+One example:
+
+```go
+dir := llb.Scratch().File(llb.Mkdir("/dir", 0755))
+dirFoo := dir.File(llb.Mkfile("/dir/foo", 0755, nil))
+// rmFoo consists of a delete of /dir/foo
+rmFoo := llb.Diff(dirFoo, dirFoo.File(llb.Rm("/dir/foo")))
+
+// otherdir just consists of /otherdir
+otherdir := llb.Scratch().File(llb.Mkdir("/otherdir", 0755))
+
+// merged consists of /otherdir and /dir (no /dir/foo though)
+merged := llb.Merge([]llb.State{otherdir, rmFoo})
+```
+
+In this case, you start with just `/otherdir` and apply `rmFoo`, which is a
+deletion of `/dir/foo`. But `/dir/foo` doesn't exist, so it may be reasonable
+to expect that it just has no effect. However, image import/export code will
+actually create `/dir` even though it only exists in order to hold an
+inapplicable delete. As a result, Merge+Diff also have this same behavior.
diff --git a/docs/dev/request-lifecycle.md b/docs/dev/request-lifecycle.md
new file mode 100644
index 000000000000..92ded05c874e
--- /dev/null
+++ b/docs/dev/request-lifecycle.md
@@ -0,0 +1,246 @@
+# Solve Request Lifecycle
+
+Buildkit solves build graphs to find the final result. By default, nothing will
+be exported to the client, but requests can be made after solving the graph to
+export results to external destinations (like the client’s filesystem).
+
+A solve request goes through the following:
+
+1. Client makes a solve request and sends it to buildkitd over gRPC. The
+ request may either include a LLB definition, or the name of a frontend (must
+ be `dockerfile.v0` or `gateway.v0`), but it must not be both.
+2. Buildkitd receives the solve request with the Controller. The controller is
+ registered as the ControlServer gRPC service.
+3. The controller passes it down to the LLB solver, which will create a job for
+ this request. It will also create a FrontendLLBBridge, that provides a
+ solving interface over the job object.
+4. The request is processed:
+ - If the request is definition-based, it will simply build the definition.
+ - If the request is frontend-based, it will run the frontend over the
+ gateway while passing it a reference to the FrontendLLBBridge. Frontends
+ must return a result for the solve request, but they may also issue solve
+ requests themselves to the bridge.
+5. The results are plumbed back to the client, and the temporary job and bridge
+ are discarded.
+
+
+```mermaid
+sequenceDiagram
+ ControlClient ->> ControlServer : Solve
+ ControlServer ->> Solver : Solve
+
+ Solver ->> Job : Create job
+ activate Job
+
+ Solver ->> FrontendLLBBridge : Create bridge over Job
+ activate FrontendLLBBridge
+
+ Solver ->> FrontendLLBBridge : Solve
+
+ alt definition-based solve
+ FrontendLLBBridge ->> Job : Build
+ activate Job
+ Job -->> FrontendLLBBridge : Result
+ deactivate Job
+ else frontend-based solve
+ FrontendLLBBridge ->> Frontend : Solve
+ activate Frontend
+ note over FrontendLLBBridge, Frontend : Frontend must be either dockerfile.v0 or gateway.v0.
+
+ loop
+ Frontend ->> FrontendLLBBridge : Solve
+ FrontendLLBBridge ->> Job : Build
+ activate Job
+ note over FrontendLLBBridge, Frontend : Implementations may also call FrontendLLBBridge to solve graphs before returning the result.
+ Job -->> FrontendLLBBridge : Result
+ deactivate Job
+ FrontendLLBBridge -->> Frontend : Result
+ end
+
+ Frontend -->> FrontendLLBBridge : Result
+ deactivate Frontend
+ end
+
+ FrontendLLBBridge -->> Solver : Result
+ Solver ->> FrontendLLBBridge : Discard
+ deactivate FrontendLLBBridge
+
+ Solver ->> Job : Discard
+ deactivate Job
+
+ Solver -->> ControlServer : Result
+ ControlServer -->> ControlClient : Result
+```
+
+An important detail is that frontends may also issue solve requests, which are
+often definition-based solves, but can also be frontend-based solves, allowing
+for composability of frontends. Note that if a frontend makes a frontend-based
+solve request, they will share the same FrontendLLBBridge and underlying job.
+
+## Dockerfile frontend (`dockerfile.v0`)
+
+Buildkit comes with a Dockerfile frontend which essentially is a parser that
+translates Dockerfile instructions into a LLB definition. In order to introduce
+new features into the Dockerfile DSL without breaking backwards compatibility,
+Dockerfiles can include a syntax directive at the top of the file to indicate a
+frontend image to use.
+
+For example, users can include a syntax directive to use
+`docker/dockerfile:1-labs` to opt-in for an extended Dockerfile DSL that
+takes advantage of Buildkit features. However, the frontend image doesn’t have
+to be Dockerfile-specific. One can write a frontend that reads a YAML file, and
+using the syntax directive, issue the build request using `docker build -f
+my-config.yaml`.
+
+The lifecycle of a `dockerfile.v0` frontend-based solve request goes through
+the following:
+
+1. Starting from the "frontend-based solve" path, the bridge looks up the
+ Dockerfile frontend if the frontend key is `dockerfile.v0`, and requests a
+ solve to the frontend. The gateway forwarder implements the frontend
+ interface and wraps over a BuildFunc that builds Dockerfiles.
+2. The BuildFunc issues a solve request to read the Dockerfile from a source
+ (local context, git, or HTTP), and parses it to find a syntax directive.
+ - If a syntax directive is found, it delegates the solve to the `gateway.v0`
+ frontend.
+ - If a syntax directive is not found, then it parses the Dockerfile
+ instructions and builds an LLB. The LLB is marshaled into a definition and
+ sent in a solve request.
+
+
+```mermaid
+sequenceDiagram
+ participant Job
+ participant FrontendLLBBridge
+
+ # FIXME: use boxes with https://github.com/mermaid-js/mermaid/issues/1505
+ # box "Dockerfile frontend"
+ participant Frontend as Gateway Forwarder
+ participant BuildFunc
+ # end box
+
+ # FIXME: use incoming messages with https://github.com/mermaid-js/mermaid/issues/1357
+ Job ->> FrontendLLBBridge : Solve
+ FrontendLLBBridge ->> Frontend : Solve
+
+ Frontend ->> BuildFunc : Call
+ activate BuildFunc
+
+ BuildFunc ->> FrontendLLBBridge : Solve
+ FrontendLLBBridge ->> Job : Build
+ activate Job
+ note over Frontend : Solve to read Dockerfile
+ Job -->> FrontendLLBBridge : Result
+ deactivate Job
+ FrontendLLBBridge -->> BuildFunc : Result
+
+ alt Dockerfile has syntax directive
+ BuildFunc ->> FrontendLLBBridge : Solve
+ activate FrontendLLBBridge #FFBBBB
+ note over Frontend : Dockerfile delegates to gateway.v0
+ FrontendLLBBridge -->> BuildFunc : Result
+ deactivate FrontendLLBBridge
+ else Dockerfile has no syntax directive
+ BuildFunc ->> FrontendLLBBridge : Solve
+ FrontendLLBBridge ->> Job : Build
+ activate Job
+ note over Frontend : Solved by Dockerfile2LLB
+ Job -->> FrontendLLBBridge : Result
+ deactivate Job
+ FrontendLLBBridge -->> BuildFunc : Result
+ end
+
+ BuildFunc -->> Frontend : Return
+ deactivate BuildFunc
+
+ Frontend -->> FrontendLLBBridge : Result
+ FrontendLLBBridge -->> Job : Result
+```
+
+## Gateway frontend (`gateway.v0`)
+
+The gateway frontend allows external frontends to be implemented as container
+images, allowing for a pluggable architecture. The container images have access
+to the gRPC service through stdin/stdout. The easiest way to implement a
+frontend image is to create a golang binary that vendors buildkit because they
+have a convenient LLB builders and utilities.
+
+The lifecycle of a `gateway.v0` frontend-based solve request goes through the
+following:
+
+1. Starting from the "frontend-based solve" path, the bridge looks up the
+ Gateway frontend if the frontend key is `gateway.v0`, and requests a solve
+ to the frontend.
+2. The gateway frontend resolves a frontend image from the `source` key
+ and solves the request to retrieve the rootfs for the image.
+3. A temporary gRPC server is created that forwards requests to the LLB bridge.
+4. A container using the frontend image rootfs is created, and a gRPC
+ connection is established from a process inside the container to the
+ temporary bridge forwarder.
+5. The frontend image is then able to build LLBs and send solve requests
+ through the forwarder.
+6. The container exits, and then the results are plumbed back to the LLB
+ bridge, which plumbs them back to the client.
+
+
+```mermaid
+sequenceDiagram
+ participant Job
+ participant FrontendLLBBridge
+ participant Frontend as Gateway frontend
+ participant Worker
+ participant LLBBridgeForwarder
+ participant Executor
+ participant Container as Frontend Container
+
+ Job ->> FrontendLLBBridge : Solve
+ FrontendLLBBridge ->> Frontend : Solve
+ Frontend ->> Worker : ResolveImageConfig
+ activate Worker
+ Worker -->> Frontend : Digest
+ deactivate Worker
+ Frontend ->> FrontendLLBBridge : Solve
+
+ FrontendLLBBridge ->> Job : Build
+ activate Job
+ note over FrontendLLBBridge, Frontend : The frontend image specified by build option "source" is solved and the rootfs of that image is then used to run the container.
+ Job -->> FrontendLLBBridge : Result
+ deactivate Job
+
+ FrontendLLBBridge -->> Frontend : Result
+
+ note over LLBBridgeForwarder, Executor : A temporary gRPC server is created that listens on stdio of frontend container. Requests are then forwarded to LLB bridge.
+ Frontend ->> LLBBridgeForwarder : Create forwarder
+ activate LLBBridgeForwarder
+
+ Frontend ->> FrontendLLBBridge : Exec
+ FrontendLLBBridge ->> Worker : Exec
+ Worker ->> Executor : Exec
+
+ Executor ->> Container : Create container task
+ activate Container #MediumSlateBlue
+
+ rect rgba(100, 100, 100, .1)
+ note over Executor, Container : Frontend images may request definition/frontend-based solves like any other client.
+ loop
+ Container ->> LLBBridgeForwarder : Solve
+ LLBBridgeForwarder ->> FrontendLLBBridge : Solve
+ activate FrontendLLBBridge #FFBBBB
+ FrontendLLBBridge -->> LLBBridgeForwarder : Result
+ deactivate FrontendLLBBridge
+ LLBBridgeForwarder -->> Container : Result
+ end
+ end
+
+ Container -->> Executor : Exit
+ deactivate Container
+
+ Executor -->> Worker : Exit
+ Worker -->> FrontendLLBBridge : Exit
+ FrontendLLBBridge -->> Frontend : Exit
+ Frontend ->> LLBBridgeForwarder : Discard
+ deactivate LLBBridgeForwarder
+
+ Frontend -->> FrontendLLBBridge : Result
+ FrontendLLBBridge -->> Job : Result
+```
diff --git a/docs/dev/solver.md b/docs/dev/solver.md
new file mode 100644
index 000000000000..db8b9e146d13
--- /dev/null
+++ b/docs/dev/solver.md
@@ -0,0 +1,325 @@
+# Buildkit solver design
+
+The solver is a component in BuildKit responsible for parsing the build
+definition and scheduling the operations to the workers for execution.
+
+Solver package is heavily optimized for deduplication of work, concurrent
+requests, remote and local caching and different per-vertex caching modes. It
+also allows operations and frontends to call back to itself with new definition
+that they have generated.
+
+The implementation of the solver is quite complicated, mostly because it is
+supposed to be performant with snapshot-based storage layer and distribution
+model using layer tarballs. It is expected that calculating the content based
+checksum of snapshots between every operation or after every command execution
+is too slow for common use cases and needs to be postponed to when it is likely
+to have a meaningful impact. Ideally, the user shouldn't realize that these
+optimizations are taking place and just get intuitive caching. It is also hoped
+that if some implementations can provide better cache capabilities, the solver
+would take advantage of that without requiring significant modification.
+
+In addition to avoiding content checksum scanning the implementation is also
+designed to make decisions with minimum available data. For example, for remote
+caching sources to be effective the solver will not require the cache to be
+loaded or exists for all the vertexes in the graph but will only load it for
+the final node that is determined to match cache. As another example, if one of
+the inputs (for example image) can produce a definition based cache match for a
+vertex, and another (for example local source files) can only produce a
+content-based(slower) cache match, the solver is designed to detect it and skip
+content-based check for the first input(that would cause a pull to happen).
+
+## Build definition
+
+The solver takes in a build definition in the form of a content addressable
+operation definition that forms a graph.
+
+A vertex in this graph is defined by these properties:
+
+```go
+type Vertex interface {
+ Digest() digest.Digest
+ Options() VertexOptions
+ Sys() interface{}
+ Inputs() []Edge
+ Name() string
+}
+
+type Edge struct {
+ Index Index
+ Vertex Vertex
+}
+
+type Index int
+```
+
+Every vertex has a content-addressable digest that represents a checksum of the
+definition graph up to that vertex including all of its inputs. If two vertexes
+have the same checksum, they are considered identical when they are executing
+concurrently. That means that if two other vertexes request a vertex with the
+same digest as an input, they will wait for the same operation to finish.
+
+The vertex digest can only be used for comparison while the solver is running
+and not between different invocations. For example, if parallel builds require
+using `docker.io/library/alpine:latest` image as one of the operations, it is
+pulled only once. But if a build using `docker.io/library/alpine:latest` was
+built earlier, the checksum based on that name can't be used for finding if the
+vertex was already built because the image might have changed in the registry
+and "latest" tag might be pointing to another image.
+
+`Sys()` method returns an object that is used to resolve the executor for the
+operation. This is how a definition can pass logic to the worker that will
+execute the task associated with the vertex, without the solver needing to know
+anything about the implementation. When the solver needs to execute a vertex,
+it will send this object to a worker, so the worker needs to be configured to
+understand the object returned by `Sys()`. The solver itself doesn't care how
+the operations are implemented and therefore doesn't define a type for this
+value. In LLB solver this value would be with type `llb.Op`.
+
+`Inputs()` returns an array of other vertexes the current vertex depends on. A
+vertex may have zero inputs. After an operation has executed, it returns an
+array of return references. If another operation wants to depend on any of
+these references they would define an input with that vertex and an index of
+the reference from the return array(starting from zero). Inputs need to be
+contained in the `Digest()` of the vertex - two vertexes with different inputs
+should never have the same digest.
+
+Options contain extra information that can be associated with the vertex but
+what doesn't change the definition(or equality check) of it. Normally this is
+either a hint to the solver, for example, to ignore cache when executing. It
+can also be used for associating messages with the vertex that can be helpful
+for tracing purposes.
+
+## Operation interface
+
+Operation interface is how the solver can evaluate the properties of the actual
+vertex operation. These methods run on the worker, and their implementation is
+determined by the value of `vertex.Sys()`. The solver is configured with a
+"resolve" function that can convert a `vertex.Sys()` into an `Op`.
+
+```go
+// Op is an implementation for running a vertex
+type Op interface {
+ // CacheMap returns structure describing how the operation is cached.
+ // Currently only roots are allowed to return multiple cache maps per op.
+ CacheMap(context.Context, int) (*CacheMap, bool, error)
+ // Exec runs an operation given results from previous operations.
+ // Note that this is not the process execution but can have any definition.
+ Exec(ctx context.Context, inputs []Result) (outputs []Result, err error)
+}
+
+type CacheMap struct {
+ // Digest is a base digest for operation that needs to be combined with
+ // inputs cache or selectors for dependencies.
+ Digest digest.Digest
+ Deps []struct {
+ // Optional digest that is merged with the cache key of the input
+ Selector digest.Digest
+ // Optional function that returns a digest for the input based on its
+ // return value
+ ComputeDigestFunc ResultBasedCacheFunc
+ }
+}
+
+type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error)
+
+
+// Result is an abstract return value for a solve
+type Result interface {
+ ID() string
+ Release(context.Context) error
+ Sys() interface{}
+}
+```
+
+There are two functions that every operation defines. One describes how to
+calculate a cache key for a vertex and another how to execute it.
+
+`CacheMap` is a description for calculating the cache key. It contains a digest
+that is combined with the cache keys of the inputs to determine the stable
+checksum that can be used to cache the operation result. For the vertexes that
+don't have inputs(roots), it is important that this digest is a stable secure
+checksum. For example, in LLB this digest is a manifest digest for container
+images or a commit SHA for git sources.
+
+`CacheMap` may also define optional selectors or content-based cache functions
+for its inputs. A selector is combined with the input cache key and useful for
+describing when different parts of an input are being used, and inputs cache
+key needs to be customized. Content-based cache function allows computing a new
+cache key for an input after it has completed. In LLB this is used for
+calculating cache key based on the checksum of file contents of the input
+snapshots.
+
+`Exec` executes the operation defined by a vertex by passing in the results of
+the inputs.
+
+## Shared graph
+
+After new build request is sent to the solver, it first loads all the vertexes
+to the shared graph structure. For status tracking, a job instance needs to be
+created, and vertexes are loaded through jobs. A job ID is assigned to every
+vertex. If vertex with the same digest has already been loaded to the shared
+graph, a new job ID is appended to the existing record. When the job finishes,
+it removes all of its references from the loaded vertex. The resources are
+released if no more references remain.
+
+Loading a vertex also creates a progress writer associated with it and sets up
+the cache sources associated with the specific vertex.
+
+After vertexes have been loaded to the job, it is safe to request a result from
+an edge pointing to a previously loaded vertex. To do this `build(ctx, Edge)
+(CachedResult, error)` method is called on the static scheduler instance
+associated with the solver.
+
+## Scheduler
+
+The scheduler is a component responsible for invoking the individual operations
+needed to find the result for the graph. While the build definition is defined
+with vertexes, the scheduler is solving edges. In the case of LLB solver, a
+result of a solved edge is associated with a snapshot. Usually, to solve an
+edge, the input edges need to be solved first and this can be done
+concurrently, but there are many exceptions like edge may be cached but its
+input might be not, or solving one input might cause a cache hit while solving
+others would just be wasteful. Scheduler tries do handle all these cases.
+
+The scheduler is implemented as a single threaded non-blocking event loop. The
+single threaded constraint is for simplicity and might be removed in the future -
+currently, it is not known if this would have any performance impact. All the
+events in the scheduler have one fixed sender and receiver. The interface for
+interacting with the scheduler is to create a "pipe" between a sender and a
+receiver. One or both sides of the pipe may be an edge instance of the graph.
+If a pipe is added it to the scheduler and an edge receives an event from the
+pipe, the scheduler will "unpark" that edge so it can process all the events it
+had received.
+
+The unpark handler for an edge needs to be non-blocking and execute quickly.
+The edge will process the data from the incoming events and update its internal
+state. When calling unpark, the scheduler has already separated out the sender
+and receiver sides of the pipes that in the code are referred as incoming and
+outgoing requests. The incoming requests are usually requests to retrieve a
+result or a cache key from an edge. If it appears that an edge doesn't have
+enough internal state to satisfy the requests, it can make new pipes and
+register them with the scheduler. These new pipes are generally of two types:
+ones asking for some async function to be completed and others that request an
+input edge to reach a specific state first.
+
+To avoid bugs and deadlocks in this logic, the unpark method needs to follow
+the following rules. If unpark has finished without completing all incoming
+requests it needs to create outgoing requests. Similarly, if an incoming
+request remains pending, at least one outgoing request needs to exist as well.
+Failing to comply with this rule will cause the scheduler to panic as a
+precaution to avoid leaks and hiding errors.
+
+## Edge state
+
+During unpark, edge state is incremented until it can fulfill the incoming
+requests.
+
+An edge can be in the following states: initial, cache-fast, cache-slow,
+completed. Completed edge contains a reference to the final result,
+in-progress edge may have zero or more cache keys.
+
+The initial state is the starting state for any edge. If a state has reached a
+cache-fast state, it means that all the definition based cache key lookups have
+been performed. Cache-slow means that content-based cache lookup has been
+performed as well. If possible, the scheduler will avoid looking up the slow
+keys of inputs if they are unnecessary for solving current edge.
+
+The unpark method is split into four phases. The first phase processes all
+incoming events (responses from outgoing requests or new incoming requests)
+that caused the unpark to be called. These contain responses from async
+functions like calls to get the cachemap, execution result or content-based
+checksum for an input, or responses from input edges when their state or number
+of cache keys has changed. All the results are stored in edge's internal state.
+For the new cache keys, a query is performed to determine if any of them can
+create potential matches to the current edge.
+
+After that, if any of the updates caused changes to edge's properties, a new
+state is calculated for the current vertex. In this step, all potential cache
+keys from inputs can cause new cache keys for the edge to be created and the
+status of an edge might be updated.
+
+Third, the edge will go over all of its incoming requests, to determine if the
+current internal state is sufficient for satisfying them all. There are a
+couple of possibilities how this check may end up. If all requests can be
+completed and there are no outgoing requests the requests finish and unpark
+method returns. If there are outgoing requests but the edge has reached the
+completed state or all incoming requests have been canceled, the outgoing
+requests are canceled. This is an async operation as well and will cause unpark
+to be called again after completion. If this condition didn't apply but
+requests could be completed and there are outgoing requests, then the incoming
+request is answered but not completed. The receiver can then decide to cancel
+this request if needed. If no new data has appeared to answer the incoming
+requests, the desired state for an edge is determined for an edge from the
+incoming requests, and we continue to the next step.
+
+The fourth step sets up outgoing requests based on the desired state determined
+in the third step. If the current state requires calling any async functions to
+move forward then it is done here. We will also loop through all the inputs to
+determine if it is important to raise their desired state. Depending on what
+inputs can produce content based cache keys and what inputs have already
+returned possible cache matches, the desired state for inputs may be raised at
+different times.
+
+When an edge needs to resolve an operation to call the async `CacheMap` and
+`Exec` methods, it does so by calling back to the shared graph. This makes sure
+that two different edges pointing to the same vertex do not execute twice. The
+result values for the operation that is shared by the edges is also cached
+until the vertex is cleaned up. Progress reporting is also handled and
+forwarded to the job through this shared vertex instance.
+
+Edge state is cleaned up when a final job that loaded the vertexes that they
+are connected to is discarded.
+
+## Cache providers
+
+Cache providers determine if there is a result that matches the cache keys
+generated during the build that could be reused instead of fully reevaluating
+the vertex and its inputs. There can be multiple cache providers, and specific
+providers can be defined per vertex using the vertex options.
+
+There are multiple backend implementations for cache providers, in-memory one
+used in unit tests, the default local one using bbolt and one based on cache
+manifests in a remote registry.
+
+Simplified cache provider has following methods:
+
+```go
+Query(...) ([]*CacheKey, error)
+Records(ck *CacheKey) ([]*CacheRecord, error)
+Load(ctx context.Context, rec *CacheRecord) (Result, error)
+Save(key *CacheKey, s Result) (*ExportableCacheKey, error)
+```
+
+Query method is used to determine if there exist a possible cache link between
+the input and a vertex. It takes parameters provided by `op.CacheMap` and cache
+keys returned by the calling the same method on its inputs.
+
+If a cache key has been found, the matching records can be asked for them. A
+cache key can have zero or more records. Having a record means that a cached
+result can be loaded for a specific vertex. The solver supports partial cache
+chains, meaning that not all inputs need to have a cache record to match cache
+for a vertex.
+
+Load method is used to load a specific record into a result reference. This
+value is the same type as the one returned by the `op.Exec` method.
+
+Save allows adding more records to the cache.
+
+## Merging edges
+
+One final piece of solver logic allows merging two edges into one when they
+have both returned the same cache key. In practice, this appears for example
+when a build uses image references `alpine:latest` and `alpine@sha256:abcabc`
+in its definition and they actually point to the same image. Another case where
+this appears is when same source files from different sources are being used as
+part of the build.
+
+After scheduler has called `unpark()` on an edge it checks it the method added
+any new cache keys to its state. If it did it will check its internal index if
+another active edge already exists with the same cache key. If it does it
+performs some basic validation, for example checking that the new edge has not
+explicitly asked cache to be ignored, and if it passes, merges the states of
+two edges.
+
+In the result of the merge, the edge that was checked is deleted, its ongoing
+requests are canceled and the incoming ones are added to the original edge.
diff --git a/docs/experimental-syntaxes.md b/docs/experimental-syntaxes.md
index 138050f94919..6dbb48754ec0 100644
--- a/docs/experimental-syntaxes.md
+++ b/docs/experimental-syntaxes.md
@@ -1,4 +1,3 @@
# Dockerfile frontend syntaxes
-Documentation for Dockerfile syntaxes can be found in the
-[Dockerfile frontend documentation](/frontend/dockerfile/docs/syntax.md)
+This page has moved to [Dockerfile reference documentation](/frontend/dockerfile/docs/reference.md)
diff --git a/docs/images-readme.md b/docs/images-readme.md
index 8959f13abcd0..0ac33f3367e6 100644
--- a/docs/images-readme.md
+++ b/docs/images-readme.md
@@ -4,15 +4,15 @@ BuildKit is a concurrent, cache-efficient, and Dockerfile-agnostic builder toolk
Report issues at https://github.com/moby/buildkit
-Join `#buildkit` channel on [Docker Community Slack](http://dockr.ly/slack)
+Join `#buildkit` channel on [Docker Community Slack](https://dockr.ly/comm-slack)
# Tags
### Latest stable release
-- [`v0.9.0`, `latest`](https://github.com/moby/buildkit/blob/v0.9.0/Dockerfile)
+- [`v0.10.0`, `latest`](https://github.com/moby/buildkit/blob/v0.10.0/Dockerfile)
-- [`v0.9.0-rootless`, `rootless`](https://github.com/moby/buildkit/blob/v0.9.0/Dockerfile) (see [`docs/rootless.md`](https://github.com/moby/buildkit/blob/master/docs/rootless.md) for usage)
+- [`v0.10.0-rootless`, `rootless`](https://github.com/moby/buildkit/blob/v0.10.0/Dockerfile) (see [`docs/rootless.md`](https://github.com/moby/buildkit/blob/master/docs/rootless.md) for usage)
### Development build from master branch
diff --git a/docs/merge+diff.md b/docs/merge+diff.md
deleted file mode 100644
index 09322f332b58..000000000000
--- a/docs/merge+diff.md
+++ /dev/null
@@ -1,418 +0,0 @@
-# Merge and Diff Ops
-MergeOp and DiffOp are two interrelated LLB operations that enable the rebasing of LLB results onto other results and the separation of LLB results from their base, respectively. Underneath the hood, these ops enable fine grain manipulation of container layer chains that can result in highly efficient operations for many use cases.
-
-This doc assumes some familiarity with LLB and ops like ExecOp and FileOp. More background on LLB can be obtained from the README.md in Buildkit's git repository. This doc also uses the Go LLB client for examples, though MergeOp and DiffOp are not in any way language specific.
-
-## MergeOp
-MergeOp has a very simple interface:
-```go
-func Merge(inputs []llb.State) llb.State
-```
-
-The intuition is that it merges the contents of the provided states together into one state (hence the name), with files from later states taking precedence over those from earlier ones.
-
-To be more concrete, MergeOp returns a state where each of the input states are rebased on top of each other in the order provided. "Rebasing" a state `B` onto another state `A` creates a state that:
-* Has all the contents of `B`
-* Has all the contents of `A` except when a path exists in both `B` and `A`. In this case:
- * If both paths are directories, their contents are merged. Metadata (such as permissions) on the directory from `B` take precedence.
- * If one of the paths is not a directory, whatever is present in `B` takes precedence. This also means that if a file in `B` overwrites a dir in `A`, then all files/dirs in the tree under at that path in `A` are also removed.
-
-MergeOp is associative, i.e. using shorthand notation: `Merge(A, B, C) == Merge(Merge(A, B), C) == Merge(A, Merge(B, C))`. Buildkit knows this and internally optimizes LLB merges that are equivalent in this way to re-use the same cache entries.
-
-There are more subtleties to the behavior of MergeOp, such as when deletions are present in a layer making up a state, discussed in the "Advanced Details" section of this doc.
-
-States created by MergeOp are the same as any other LLB states in that they can be used as the base for exec, be mounted to arbitrary paths in execs, be plugged into other merges and diffs, be exported, etc.
-
-As a very simple example:
-```go
-// a has /dir/a
-a := llb.Scratch().
- File(llb.Mkdir("/dir", 0755)).
- File(llb.Mkfile("/dir/a", 0644, []byte("a")))
-
-// b has /dir/b and /otherdir
-b := llb.Scratch().
- File(llb.Mkdir("/dir", 0755)).
- File(llb.Mkfile("/dir/b", 0644, []byte("b"))).
- File(llb.Mkdir("/otherdir", 0755))
-
-// c has /dir/a and /dir/c
-c := llb.Scratch().
- File(llb.Mkdir("/dir", 0700)).
- File(llb.Mkfile("/dir/a", 0644, []byte("overwritten"))).
- File(llb.Mkfile("/dir/c", 0644, []byte("c")))
-
-// merged will consist of /dir/a, /dir/b, /dir/c and /otherdir.
-// The file at /dir/a will have contents set to "overwritten" because c is merged after a.
-// /dir will have permissions set to 0700 for the same reason.
-merged := llb.Merge([]llb.State{a, b, c})
-
-// merged can be used as the base for new states
-mergedPlusMore := merged.File(llb.Mkdir("/yetanotherdir", 0755))
-// or as the input to other merges
-mergedPlusMore = llb.Merge([]llb.State{merged, llb.Scratch().File(llb.Mkdir("/yetanotherdir", 0755))})
-```
-
-### Container Image Export
-When the result of a MergeOp is exported as a container image, the image will consist of the layers making up each input joined together in the order of the MergeOp. If Buildkit has cached any one of these layers already, they will not need to be re-exported (i.e. re-packaged into compressed tarballs). Additionally, if the image is being pushed to a registry and the registry indicates it already has any of the layers, then Buildkit can skip pushing those layers entirely.
-
-Layers joined together by MergeOp do not have dependencies on each other, so a cache invalidation of the layers of one input doesn't cascade to the layers of the other inputs.
-
-## DiffOp
-DiffOp also has a very simple interface:
-```go
-func Diff(lower llb.State, upper llb.State) llb.State
-```
-
-The intuition is that it returns a state whose contents are the difference between `lower` and `upper`. It can be viewed as something like the inverse of MergeOp; whereas MergeOp "adds" states together, DiffOp "subtracts" `lower` from `upper` (in a manner of speaking).
-
-More specifically, DiffOp returns a state that has the contents present in `upper` that either aren't present in `lower` or have changed from `lower` to `upper`. Another way of thinking about it is that if you start at `A` and apply `Diff(A, B)`, you will end up at `B`. Or, even more succinctly, `Merge(A, Diff(A, B)) == B`.
-
-Files and dirs are considered to have changed between `lower` and `upper` if their contents are unequal or if metadata like permissions and `mtime` have changed. Unequal `atime` or `ctime` values are not considered to be a change.
-
-There are more subtleties to the behavior of DiffOp discussed in the "Advanced Details" section of this doc.
-
-States created by DiffOp are the same as any other LLB states in that they can be used as the base for exec, be mounted to arbitrary paths in execs, be plugged into merges and other diffs, be exported, etc.
-
-As a very simple example:
-```go
-base := llb.Image("alpine")
-basePlusBuilt := base.Run(llb.Shlex("touch /foo")).Root()
-// diffed consists of just the file /foo, nothing in the alpine image is present
-diffed := llb.Diff(base, basePlusBuilt)
-```
-
-### Container Image Export
-When the result of a DiffOp is exported as a container image, layers will be re-used as much as possible. To explain, consider this case:
-```go
-lower := llb.Image("alpine")
-middle := lower.Run(llb.Shlex("touch /foo")).Root()
-upper := middle.Run(llb.Shlex("touch /bar")).Root()
-diff := llb.Diff(lower, upper)
-```
-
-In this case, there is a "known chain" from `lower` to `upper` because `lower` is a state in `upper`'s history. This means that when the DiffOp is exported as a container image, it can just consist of the container layers for `middle` joined with the container layers for `upper`.
-
-Another way of thinking about this is that when `lower` is a state in `upper`'s history, the diff between the two is equivalent to a merge of the states between them. So, using the example above:
-```go
-llb.Diff(lower, upper) == llb.Merge([]llb.State{
- llb.Diff(lower, middle),
- llb.Diff(middle, upper),
-})
-````
-This behavior extends to arbitrary numbers of states separating `lower` and `upper`.
-
-In the case where there is not a chain between `lower` and `upper` that Buildkit can determine, DiffOp still works consistently but, when exported, will always result in a single layer that is not re-used from its inputs.
-
-## Example Use Case: Better "Copy Chains" with MergeOp
-### The Problem
-A common pattern when building container images is to independently assemble components of the image and then combine those components together into a final image using a chain of Copy FileOps. For example, when using the Dockerfile frontend, this is the multi-stage build pattern and a chain of `COPY --from=...` statements.
-
-One issue with this type of pattern is that if any of the inputs to the copy chain change, that doesn't just invalidate Buildkit's cache for that input, it also invalidates Buildkit's cache for any copied layers after that one.
-
-To be a bit more concrete, consider the following LLB as specified with the Go client:
-```go
-// stage a
-a := llb.Image("alpine").Run("build a").Root()
-// stage b
-b := llb.Image("alpine").Run("build b").Root()
-// stage c
-c := llb.Image("alpine").Run("build c").Root()
-
-// final combined stage
-combined := llb.Image("alpine").
- File(llb.Copy(a, "/bin/a", "/usr/local/bin/a")).
- File(llb.Copy(b, "/bin/b", "/usr/local/bin/b")).
- File(llb.Copy(c, "/bin/c", "/usr/local/bin/c"))
-```
-
-Note that this is basically the equivalent of the following Dockerfile:
-```dockerfile
-FROM alpine as a
-RUN build a
-
-FROM alpine as b
-RUN build b
-
-FROM alpine as c
-RUN build c
-
-FROM alpine as combined
-COPY --from=a /bin/a /usr/local/bin/a
-COPY --from=b /bin/b /usr/local/bin/b
-COPY --from=c /bin/c /usr/local/bin/c
-```
-
-Now, say you do a build of this LLB and export the `combined` stage as a container image to a registry. If you were to then repeat the same build with the same instance of Buildkit, each part of the build should be cached, resulting in no work needing to be done and no layers needing to be exported or pushed to the registry.
-
-Then, say you later do the build again but this time with a change to `a`. The build for `a` is thus not cached, which means that the copy of `/bin/a` into `/usr/local/bin/a` of `combined` is also not cached and has to be re-run. The problem is that because each copy in to `combined` is chained together, the invalidation of the copy from `a` also cascades to its descendants, namely the copies from `b` and `c`. This is despite the fact that `b` and `c` are independent of `a` and thus don't need to be invalidated. In graphical form:
-```mermaid
-graph TD
- alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0")
- alpine -->|CACHE HIT fa:fa-check| B("build b")
- alpine -->|CACHE HIT fa:fa-check| C("build c")
-
- A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a)
- busybox("busybox") -->|CACHE HIT fa:fa-check| ACopy
- B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b)
- ACopy -->|CACHE MISS fa:fa-ban| BCopy
- C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c)
- BCopy -->|CACHE MISS fa:fa-ban| CCopy
-
- classDef green fill:#5aa43a,stroke:#333,stroke-width:2px;
- class alpine,B,C,busybox green
- classDef red fill:#c72020,stroke:#333,stroke-width:2px;
- class A,ACopy,BCopy,CCopy red
-```
-
-As a result, not only do the copies from `b` and `c` to create `/usr/local/bin/b` and `/usr/local/bin/c` need to run again, they also result in new layers needing to be exported and then pushed to a registry. For many use cases, this becomes a significant source of overhead in terms of build times and the amount of data that needs to be stored and transferred.
-
-### The Solution
-MergeOp can be used to fix the problem of cascading invalidation in copy chains:
-```go
-a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/local/bin/a"))
-b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/local/bin/b"))
-c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/local/bin/c"))
-combined := llb.Merge([]llb.State{
- llb.Image("busybox"),
- a,
- b,
- c,
-})
-```
-
-(*Note that newer versions of Dockerfiles support a `--link` flag when using `COPY`, which results in basically this same pattern*)
-
-Two changes have been made from the previous version:
-1. `a`, `b`, and `c` have been updated to copy their desired contents to `Scratch` (a new, empty state).
-1. `combined` is defined as a MergeOp of the states desired in the final image.
-
-Say you're doing this build for the first time. The build will first create states `a`, `b`, and `c`, resulting in each being a single layer consisting only of contents `/usr/local/bin/a`, `/usr/local/bin/b`, and `/usr/local/bin/c` respectively. Then, the MergeOp rebases each of those states on to the base `busybox` image. As discussed earlier, the container image export of a MergeOp will consist of the layers of the merge inputs joined together, so the final image looks mostly the same as before.
-
-The benefits of MergeOp become apparent when considering what happens if the build of `a` is modified. Whereas before this led to invalidation of the copy of `b` and `c`, now those merge inputs are completely unaffected; no new cache entries or new container layers need to be created for them. So, the end result is that the only work Buildkit does when `a` changes is re-build `a` and then push the new layers for `/usr/local/bin/a` (plus a new image manifest). `/usr/local/bin/b` and `/usr/local/bin/c` do not need to be re-exported and do not need to be re-pushed to the registry. In graphical form:
-```mermaid
-graph TD
- alpine("alpine") --> |CACHE HIT fa:fa-check| A("build a2.0")
- alpine -->|CACHE HIT fa:fa-check| B("build b")
- alpine -->|CACHE HIT fa:fa-check| C("build c")
-
- busybox("busybox") -->|CACHE HIT fa:fa-check| Merge("Merge (lazy)")
- A --> |CACHE MISS fa:fa-ban| ACopy(/usr/local/bin/a)
- ACopy -->|CACHE MISS fa:fa-ban| Merge
- B -->|CACHE HIT fa:fa-check| BCopy(/usr/local/bin/b)
- BCopy -->|CACHE HIT fa:fa-check| Merge
- C -->|CACHE HIT fa:fa-check| CCopy(/usr/local/bin/c)
- CCopy -->|CACHE HIT fa:fa-check| Merge
-
- classDef green fill:#5aa43a,stroke:#333,stroke-width:2px;
- class alpine,B,BCopy,C,CCopy,busybox green
- classDef red fill:#c72020,stroke:#333,stroke-width:2px;
- class A,ACopy red
-```
-
-An important aspect of this behavior is that MergeOp is implemented lazily, which means that its on-disk filesystem representation is only created locally when strictly required. This means that even though a change to `a` invalidates the MergeOp as a whole, no work needs to be done to create the merged state on-disk when it's only being exported as a container image. This laziness behavior is discussed more in the "Performance Considerations" section of the doc.
-
-You can see a working-code example of this by comparing `examples/buildkit3` with `examples/buildkit4` in the Buildkit git repo.
-
-## Example Use Case: Remote-only Image Append with MergeOp
-If you have some layers already pushed to a remote registry, MergeOp allows you to create new images that combine those layers in arbitrary ways without having to actually pull any layers down first. For example:
-```go
-foo := llb.Image("fooApp:v0.1")
-bar := llb.Image("barApp:v0.3")
-qaz := llb.Image("qazApp:v1.2")
-merged := llb.Merge([]llb.State{foo, bar, qaz})
-```
-If `merged` is being exported to the same registry that already has the layers for `fooApp`, `barApp` and `qazApp`, then the only thing Buildkit does during the export is create an image manifest (just some metadata) and push it to the registry. No layers need to be pushed (they are already there) and they don't even need to be pulled locally to Buildkit either.
-
-Note that if you were to instead do this:
-```go
-merged := llb.Merge([]llb.State{foo, bar, qaz}).Run(llb.Shlex("extra command")).Root()
-```
-Then `fooApp`, `barApp` and `qazApp` will need to be pulled, though they will usually be merged together more efficiently than the naive solution of just unpacking the layers on top of each other. See the "Performance Details" section for more info.
-
-Additionally, if you export your Buildkit cache to a registry, this same idea can be extended to any LLB types, not just `llb.Image`. So, using the same example as the previous use case:
-```go
-a := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build a").Root(), "/bin/a", "/usr/bin/a"))
-b := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build b").Root(), "/bin/b", "/usr/bin/b"))
-c := llb.Scratch().File(llb.Copy(llb.Image("alpine").Run("build c").Root(), "/bin/c", "/usr/bin/c"))
-combined := llb.Merge([]llb.State{
- llb.Image("alpine"),
- a,
- b,
- c,
-})
-```
-
-If you do a build that includes a remote cache export to a registry, then any Buildkit worker importing that cache can run builds that do different merges of those layers without having to pull anything down. For instance, if a separate Buildkit worker imported that remote cache and then built this:
-```go
-combined2 := llb.Merge([]llb.State{
- c,
- a
-})
-```
-An export of `combined2` would not need to pull any layers down because it's just a merge of `c` and `a`, which already have layers in the registry thanks to the remote cache. This works because a remote cache import is actually just a metadata download; layers are only pulled locally once needed and they aren't needed for this MergeOp.
-
-## Example Use Case: Modeling Package Builds with MergeOp+DiffOp
-Merge and Diff have many potential use cases, but one primary one is to assist higher level tooling that's using LLB to model "dependency-based builds", such as what's found in many package managers and other build systems.
-
-More specifically, the following is a common pattern used to model the build of a "package" (or equivalent concept) in such systems:
-1. The build-time dependencies of the package are combined into a filesystem. The dependencies are themselves just already-built packages.
-1. A build is run by executing some commands that have access to the combined dependencies, producing new build artifacts that are somehow isolated from the dependencies. These isolated build artifacts become the new package's contents.
-1. The new package can then be used as a dependency of other packages and/or served directly to end users, while being careful to ensure that any runtime dependencies are also present when the package needs to be utilized.
-
-One way to adapt the above model to LLB might be like this:
-```go
-// "Packages" are just LLB states. Build-time dependencies are combined
-// together into a filesystem using MergeOp.
-runtimeDeps := llb.Merge([]llb.State{depC, depD})
-buildDeps := llb.Merge([]llb.State{src, depA, depB, runtimeDeps})
-
-// Builds of a new package are ExecOps on top of the MergeOp from the previous step
-// (one ExecOp for the build and one for the install). The install ExecOp is defined
-// such that build artifacts are written to a dedicated Mount, isolating them from
-// the dependencies under /output.
-builtPackage := buildDeps.Run(
- llb.Dir("/src"),
- llb.Shlex("make"),
-).Root().Run(
- llb.Dir("/src"),
- llb.Shlex("make install"),
- llb.AddEnv("DESTDIR", "/output"),
- llb.AddMount("/output", llb.Scratch()),
-).GetMount("/output")
-
-// If the package needs to be run as part of a different build or by an
-// end user, the runtime deps of the state can be included via a MergeOp.
-llb.Merge([]llb.State{runtimeDeps, builtPackage})
-```
-
-While the above is a bit of an over-simplification (it, for instance, ignores the need to topologically sort dependency DAGs before merging them together), the important point is that it only needs MergeOp and ExecOp; DiffOp is left out entirely. For many use cases, this is completely fine and DiffOp is not needed.
-
-Some use cases can run into issues though, specifically with the part where build artifacts need to be isolated from their dependencies. The above example uses the convention of setting `DESTDIR`, an environment variable that specifies a directory that `make install` should place artifacts under. Most build systems support either `DESTDIR` or some type of equivalent mechanism for isolating installed build artifacts. However, there are times when this convention is either not available or not desired, in which case DiffOp can come to the rescue as a generic, tool-agnostic way of separating states out from their original dependency base. The modification from the previous example is quite small:
-```go
-// Same `make` command as before
-buildBase := buildDeps.Run(
- llb.Dir("/src"),
- llb.Shlex("make"),
-).Root()
-
-// Now, `make install` doesn't use DESTDIR and just installs directly
-// to the rootfs of the build. The package contents are instead isolated
-// by diffing the rootfs from before and after the install command.
-builtPackage := llb.Diff(buildBase, buildBase.Run(
- llb.Dir("/src"),
- llb.Shlex("make install"),
-).Root())
-```
-
-This approach using DiffOp should achieve the same end result as the previous version but without having to rely on `DESTDIR` support being present in the `make install` step.
-
-The fact that DiffOp is more generic and arguably simpler than setting `DESTDIR` or equivalents doesn't mean it's strictly better for every case. The following should be kept in mind when dealing with use cases where both approaches are viable:
-1. The version that uses `DESTDIR` will likely have *slightly* better performance than the version using DiffOp for many use cases. This is because it's faster for Buildkit to merge in a state that is just a single layer on top of scratch (i.e. the first version of `builtPackage` that used `DESTDIR`) than it is to merge in a state whose diff is between two non-empty states (i.e. the DiffOp version). Whether the performance difference actually matters needs to be evaluated on a case-by-case basis.
-1. DiffOp has some subtle behavior discussed in the "Advanced Details" section that, while irrelevant to most use cases, can occasionally distinguish it from the `DESTDIR` approach.
-
-## Performance Considerations
-### Laziness
-MergeOp and DiffOp are both implemented lazily in that their on-disk filesystem representations will only be created when absolutely necessary.
-
-The most common situation in which a Merge/Diff result will need to be "unlazied" (created on disk) is when it is used as the input to an Exec or File op. For example:
-```go
-rootfs := llb.Merge([]llb.State{A, B})
-extraLayer := rootfs.Run(llb.Shlex("some command")).Root()
-```
-In this case, if `extraLayer` is not already cached, `extraLayer` will need `rootfs` to exist on disk in order to run, so `rootfs` will have to be unlazied. The same idea applies if `extraLayer` was defined as a FileOp or if `rootfs` was defined using a `DiffOp`.
-
-What's perhaps more interesting are cases in which merge/diff results *don't* need to be unlazied. One such situation is when they are exported as a container image. As discussed previously, layers from the inputs of merge/diff are re-used as much as possible during image exports, so that means that the final merged/diffed result is not needed, only the inputs.
-
-Another situation that doesn't require unlazying is when a merge/diff is used as an input to another merge/diff. For example:
-```go
-diff1 := llb.Diff(A, B)
-diff2 := llb.Diff(C, D)
-merge := llb.Merge([]llb.State{diff1, diff2})
-```
-
-In this case, even though `diff1` and `diff2` are used as an input to `merge`, they do not need to be unlazied because `merge` is also lazy. If `A`, `B`, `C` or `D` are lazy LLB states, they also do not need to be unlazied. Laziness is transitive in this respect.
-
-### Snapshotter-dependent Optimizations
-There are some optimizations in the implementation of Merge and Diff op that are relevant to users concerned with scaling large builds involving many different merges and/or diffs. These optimizations are ultimately implementation details though and don't have any impact on the actual contents of merge/diff results.
-
-When a merge or diff result needs to be unlazied, the "universal" fallback implementation that works for all snapshotter backends is to create them by copying files from the inputs as needed into a new filesystem. This works but it can become costly in terms of disk space and CPU time at a certain scale.
-
-However, for two of the default snapshotters (overlay and native), there is an optimization in place to avoid copying files and instead hardlink them from the inputs into the merged/diffed filesystem. This is at least as fast as copying the files and often significantly faster for inputs with large file sizes.
-
-## Advanced Details
-These details are not expected to impact many use cases, but are worth reviewing if you are experiencing surprising behavior while using Merge and Diff op or otherwise want to understand them at a deeper level.
-
-### Layer-like Behavior of Merge and Diff
-One important principal of LLB results is that when they are exported as container images, an external runtime besides Buildkit that pulls and unpacks the image must see the same filesystem that is seen during build time.
-
-That may seem a bit obvious, but it has important implications for Merge and Diff, which are ops that are designed to re-use container layers from their inputs as much as possible in order to maximize cache re-use and efficiency. Many of the more surprising aspects of the behavior discussed in the rest of this doc are a result of needing to ensure that Merge+Diff results look the same before and after export as container layers.
-
-### Deletions
-When either 1) an LLB state deletes a file present in its parent chain or 2) `upper` lacks a path that is present in `lower` while using DiffOp, that deletion is considered an "entity" in the same way that a directory or file is and can have an effect when using that state as a merge input. For example:
-```go
-// create a state that only has /foo
-foo := llb.Scratch().File(llb.Mkfile("/foo", 0644, nil))
-
-// create a state where the file /foo has been removed, leaving nothing
-rmFoo := foo.File(llb.Rm("/foo"))
-
-// create a state containing the file /bar on top of the previous "empty" state
-bar := rmFoo.File(llb.Mkfile("/bar", 0644, nil))
-
-merged := llb.Merge([]llb.State{foo, bar})
-```
-You might assume that `merged` would consist of the files `/foo` and `/bar`, but it will actually just consist of `/bar`. This is because the state `bar` also includes a deletion of the file `/foo` in its chain and thus a part of its definition.
-
-One way of understanding this is that when you merge `foo` and `bar`, you are actually merging the diffs making up each state in the chain that created `foo` and `bar`, i.e.:
-```go
-llb.Merge([]llb.State{foo, bar}) == llb.Merge([]llb.State{
- // foo's chain (only 1 layer)
- llb.Diff(llb.Scratch(), foo), // create /foo
- // bar's chain (3 layers)
- llb.Diff(llb.Scratch(), foo), // create /foo
- llb.Diff(foo, rmFoo), // delete /foo
- llb.Diff(rmFoo, bar), // create /bar
-})
-```
-As you can see, `Diff(foo, rmFoo)` is included there and its only "content" is a deletion of `/foo`. Therefore, when `merged` is being constructed, it will apply that deletion and `/foo` will not exist in the final `merged` result.
-
-Also note that if the order of the merge was reversed to be `Merge([]State{bar, foo})`, then `/foo` will actually exist in `merged` alongside `/bar` because then the contents of `foo` take precedent over the contents of `bar`, and then create of `/foo` therefore "overwrites" the previous deletion of it.
-
-One final detail to note is that even though deletions are entities in the same way files/dirs are, they do not show up when mounted. For example, if you were to mount `llb.Diff(foo, rmFoo)` during a build, you would just see an empty directory. Deletions only have an impact when used as an input to MergeOp.
-
-#### Workarounds
-For use cases that are experiencing this behavior and do not want it, the best option is to find a way to avoid including the problematic deletion in your build definition. This can be very use-case specific, but using the previous example one option might be this:
-```go
-justBar := llb.Diff(rmFoo, bar)
-merged := llb.Merge([]llb.State{foo, justBar})
-```
-Now, `merged` consists of both `/foo` and `/bar` because `justBar` has "diffed out" its parent `rmFoo` and consists only of the final layer that creates `/bar`. Other use cases may require different approaches like changing build commands to avoid unneeded deletions of files and directories.
-
-For use cases that can't avoid the deletion for whatever reason, the fallback option is to use a Copy op to squash the merge input and discard any deletions. So, building off the previous example:
-```go
-squashedBar := llb.Scratch().File(llb.Copy(bar, "/", "/"))
-merged := llb.Merge([]llb.State{foo, squashedBar})
-```
-This results in `merged` consisting of both `/foo` and `/bar`. This is because `squashedBar` is a single layer that only consists of the file+directories that existed in `bar`, not any of its deletions.
-
-Note that there are currently performance tradeoffs to this copy approach in that it will actually result in a copy on disk (i.e. no hardlink optimizations), the copy will not be lazy and `squashedBar` will be a distinct layer from its inputs as far as the Buildkit cache and any remote registries are concerned, which may or may not matter depending on the use-case.
-
-### Diff Corner Cases
-There are some cases where it's ambiguous what the right behavior should be when merging diffs together. As stated before, Merge+Diff resolve these ambiguities by following the same behavior as container image import/export implementations in order to maintain consistency.
-
-One example:
-```go
-dir := llb.Scratch().File(llb.Mkdir("/dir", 0755))
-dirFoo := dir.File(llb.Mkfile("/dir/foo", 0755, nil))
-// rmFoo consists of a delete of /dir/foo
-rmFoo := llb.Diff(dirFoo, dirFoo.File(llb.Rm("/dir/foo")))
-
-// otherdir just consists of /otherdir
-otherdir := llb.Scratch().File(llb.Mkdir("/otherdir", 0755))
-
-// merged consists of /otherdir and /dir (no /dir/foo though)
-merged := llb.Merge([]llb.State{otherdir, rmFoo})
-```
-
-In this case, you start with just `/otherdir` and apply `rmFoo`, which is a deletion of `/dir/foo`. But `/dir/foo` doesn't exist, so it may be reasonable to expect that it just has no effect. However, image import/export code will actually create `/dir` even though it only exists in order to hold an inapplicable delete. As a result, Merge+Diff also have this same behavior.
diff --git a/docs/multi-platform.md b/docs/multi-platform.md
index 8506e885b3ac..73331bfa6846 100644
--- a/docs/multi-platform.md
+++ b/docs/multi-platform.md
@@ -41,3 +41,9 @@ docker run --privileged --rm tonistiigi/binfmt --install all
```
See also [`tonistiigi/binfmt` documentation](https://github.com/tonistiigi/binfmt/).
+
+### Builds are very slow through emulation
+
+Running binaries made for a different architecture through a software emulation layer is much slower than running binaries natively. Therefore this approach is not recommended for CPU intensive tasks like compiling binaries. It is provided as a simple solution to build existing Dockerfiles and usually works well for common tasks like installing packages and running scripts. To get native performance for compilation steps you should modify your Dockerfile to perform cross-compilation using [predefined platform ARGs](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope). Learn more from https://medium.com/@tonistiigi/faster-multi-platform-builds-dockerfile-cross-compilation-guide-part-1-ec087c719eaf . You can also use [xx](https://github.com/tonistiigi/xx) project to add cross-compilation toolchains into Dockerfiles with minimal changes.
+
+[Docker Buildx](https://github.com/docker/buildx) also supports multi-node builders where single image can be built with multiple machines that each build components for their native architectures.
diff --git a/docs/nydus.md b/docs/nydus.md
new file mode 100644
index 000000000000..709340b55d16
--- /dev/null
+++ b/docs/nydus.md
@@ -0,0 +1,47 @@
+## Nydus image formats
+
+Nydus is an OCI/Docker-compatible accelerated image format provided by the Dragonfly [image-service](https://github.com/dragonflyoss/image-service) project, which offers the ability to pull image data on-demand, without waiting for the entire image pull to complete and then start the container. It has been put in production usage and shown vast improvements to significantly reduce the overhead costs on time, network, disk IO of pulling image or starting container.
+
+Nydus image can be flexibly configured as a FUSE-based user-space filesystem or in-kernel [EROFS](https://www.kernel.org/doc/html/latest/filesystems/erofs.html) (from Linux kernel v5.16) with nydus daemon in user-space, integrating with VM-based container runtime like [KataContainers](https://katacontainers.io/) is much easier.
+
+## Creating Nydus images
+
+### Buildkitd with Nydus Support
+
+To enable buildkit support for Nydus image export, we need to build `buildkitd` with the following command:
+
+```
+go build -tags=nydus -o ./bin/buildkitd ./cmd/buildkitd
+```
+
+### Building Nydus with BuildKit
+
+Download `nydus-image` binary from [nydus release page](https://github.com/dragonflyoss/image-service/releases) (require v2.1.0 or higher), then put the `nydus-image` binary path into $PATH or specifying it on `NYDUS_BUILDER` environment variable for buildkitd:
+
+```
+env NYDUS_BUILDER=/path/to/nydus-image buildkitd ...
+```
+
+Note: some nydus intermediate files will be created in the working directory during the build process, which will be cleaned up automatically after the build is completed. Use the `NYDUS_WORKDIR` environment variable to change this working directory.
+
+On buildctl side, export nydus image as the one of compression types by specifying `compression=nydus` option:
+
+```
+buildctl build ... \
+ --output type=image,name=docker.io/username/image,push=true,compression=nydus,oci-mediatypes=true
+```
+
+### Known limitations
+
+- The export of Nydus image and runtime (e.g. [docker](https://github.com/dragonflyoss/image-service/tree/master/contrib/docker-nydus-graphdriver), [containerd](https://github.com/containerd/nydus-snapshotter), etc.) is currently only supported on linux platform.
+- Nydus image layers cannot be mixed with other compression types in the same image, so the `force-compression=true` option is automatically enabled when exporting both Nydus compression type and other compression types.
+- Specifying a Nydus image as a base image in a Dockerfile is supported, but it does not currently support lazy pulling.
+- Since exported Nydus image will always have one more metadata layer than images in other compression types, Nydus image cannot be exported/imported as cache.
+
+### Other ways to create Nydus images
+
+Pre-converted nydus images are available at [`ghcr.io/dragonflyoss/image-service` repository](https://github.com/orgs/dragonflyoss/packages?ecosystem=container) (mainly for testing purpose).
+
+[`Nydusify`](https://github.com/dragonflyoss/image-service/blob/master/docs/nydusify.md) The Nydusify CLI tool pulls & converts an OCIv1 image into a nydus image, and pushes nydus image to registry.
+
+[`Harbor Acceld`](https://github.com/goharbor/acceleration-service) Harbor acceld provides a general service to convert OCIv1 image to acceleration image like [Nydus](https://github.com/dragonflyoss/image-service) and [eStargz](https://github.com/containerd/stargz-snapshotter) etc.
diff --git a/docs/rootless.md b/docs/rootless.md
index de41b328b259..ee25875e76ee 100644
--- a/docs/rootless.md
+++ b/docs/rootless.md
@@ -4,6 +4,30 @@ Rootless mode allows running BuildKit daemon as a non-root user.
## Distribution-specific hint
Using Ubuntu kernel is recommended.
+### Container-Optimized OS from Google
+Make sure to have an `emptyDir` volume below:
+```yaml
+spec:
+ containers:
+ - name: buildkitd
+ volumeMounts:
+ # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too,
+ # but the default VOLUME does not work with rootless on Google's Container-Optimized OS
+ # as it is mounted with `nosuid,nodev`.
+ # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038
+ - mountPath: /home/user/.local/share/buildkit
+ name: buildkitd
+ volumes:
+ - name: buildkitd
+ emptyDir: {}
+```
+
+See also the [example manifests](#Kubernetes).
+
+
+Old distributions
+
+
### Debian GNU/Linux 10
Add `kernel.unprivileged_userns_clone=1` to `/etc/sysctl.conf` (or `/etc/sysctl.d`) and run `sudo sysctl -p`.
@@ -16,8 +40,8 @@ This step is not needed for RHEL/CentOS 8 and later.
### Fedora, before kernel 5.13
You may have to disable SELinux, or run BuildKit with `--oci-worker-snapshotter=fuse-overlayfs`.
-### Container-Optimized OS from Google
-:warning: Currently unsupported. See [#879](https://github.com/moby/buildkit/issues/879).
+
+
## Known limitations
* Using the `overlayfs` snapshotter requires kernel >= 5.11 or Ubuntu kernel.
@@ -77,6 +101,9 @@ $ rootlesskit buildkitd --oci-worker-snapshotter=native
### Error related to `newuidmap` or `/etc/subuid`
See https://rootlesscontaine.rs/getting-started/common/subuid/
+### Error `Options:[rbind ro]}]: operation not permitted`
+Make sure to mount an `emptyDir` volume on `/home/user/.local/share/buildkit` .
+
## Containerized deployment
### Kubernetes
diff --git a/docs/solver.md b/docs/solver.md
deleted file mode 100644
index 45b81c5cb078..000000000000
--- a/docs/solver.md
+++ /dev/null
@@ -1,161 +0,0 @@
-## Buildkit solver design
-
-The solver is a component in BuildKit responsible for parsing the build definition and scheduling the operations to the workers for execution.
-
-Solver package is heavily optimized for deduplication of work, concurrent requests, remote and local caching and different per-vertex caching modes. It also allows operations and frontends to call back to itself with new definition that they have generated.
-
-The implementation of the solver is quite complicated, mostly because it is supposed to be performant with snapshot-based storage layer and distribution model using layer tarballs. It is expected that calculating the content based checksum of snapshots between every operation or after every command execution is too slow for common use cases and needs to be postponed to when it is likely to have a meaningful impact. Ideally, the user shouldn't realize that these optimizations are taking place and just get intuitive caching. It is also hoped that if some implementations can provide better cache capabilities, the solver would take advantage of that without requiring significant modification.
-
-In addition to avoiding content checksum scanning the implementation is also designed to make decisions with minimum available data. For example, for remote caching sources to be effective the solver will not require the cache to be loaded or exists for all the vertexes in the graph but will only load it for the final node that is determined to match cache. As another example, if one of the inputs (for example image) can produce a definition based cache match for a vertex, and another (for example local source files) can only produce a content-based(slower) cache match, the solver is designed to detect it and skip content-based check for the first input(that would cause a pull to happen).
-
-### Build definition
-
-The solver takes in a build definition in the form of a content addressable operation definition that forms a graph.
-
-A vertex in this graph is defined by these properties:
-
-```go
-type Vertex interface {
- Digest() digest.Digest
- Options() VertexOptions
- Sys() interface{}
- Inputs() []Edge
- Name() string
-}
-
-type Edge struct {
- Index Index
- Vertex Vertex
-}
-
-type Index int
-```
-
-Every vertex has a content-addressable digest that represents a checksum of the definition graph up to that vertex including all of its inputs. If two vertexes have the same checksum, they are considered identical when they are executing concurrently. That means that if two other vertexes request a vertex with the same digest as an input, they will wait for the same operation to finish.
-
-The vertex digest can only be used for comparison while the solver is running and not between different invocations. For example, if parallel builds require using `docker.io/library/alpine:latest` image as one of the operations, it is pulled only once. But if a build using `docker.io/library/alpine:latest` was built earlier, the checksum based on that name can't be used for finding if the vertex was already built because the image might have changed in the registry and "latest" tag might be pointing to another image.
-
-`Sys()` method returns an object that is used to resolve the executor for the operation. This is how a definition can pass logic to the worker that will execute the task associated with the vertex, without the solver needing to know anything about the implementation. When the solver needs to execute a vertex, it will send this object to a worker, so the worker needs to be configured to understand the object returned by `Sys()`. The solver itself doesn't care how the operations are implemented and therefore doesn't define a type for this value. In LLB solver this value would be with type `llb.Op`.
-
-`Inputs()` returns an array of other vertexes the current vertex depends on. A vertex may have zero inputs. After an operation has executed, it returns an array of return references. If another operation wants to depend on any of these references they would define an input with that vertex and an index of the reference from the return array(starting from zero). Inputs need to be contained in the `Digest()` of the vertex - two vertexes with different inputs should never have the same digest.
-
-Options contain extra information that can be associated with the vertex but what doesn't change the definition(or equality check) of it. Normally this is either a hint to the solver, for example, to ignore cache when executing. It can also be used for associating messages with the vertex that can be helpful for tracing purposes.
-
-
-### Operation interface
-
-Operation interface is how the solver can evaluate the properties of the actual vertex operation. These methods run on the worker, and their implementation is determined by the value of `vertex.Sys()`. The solver is configured with a "resolve" function that can convert a `vertex.Sys()` into an `Op`.
-
-```go
-// Op is an implementation for running a vertex
-type Op interface {
- // CacheMap returns structure describing how the operation is cached.
- // Currently only roots are allowed to return multiple cache maps per op.
- CacheMap(context.Context, int) (*CacheMap, bool, error)
- // Exec runs an operation given results from previous operations.
- // Note that this is not the process execution but can have any definition.
- Exec(ctx context.Context, inputs []Result) (outputs []Result, err error)
-}
-
-type CacheMap struct {
- // Digest is a base digest for operation that needs to be combined with
- // inputs cache or selectors for dependencies.
- Digest digest.Digest
- Deps []struct {
- // Optional digest that is merged with the cache key of the input
- Selector digest.Digest
- // Optional function that returns a digest for the input based on its
- // return value
- ComputeDigestFunc ResultBasedCacheFunc
- }
-}
-
-type ResultBasedCacheFunc func(context.Context, Result) (digest.Digest, error)
-
-
-// Result is an abstract return value for a solve
-type Result interface {
- ID() string
- Release(context.Context) error
- Sys() interface{}
-}
-```
-
-There are two functions that every operation defines. One describes how to calculate a cache key for a vertex and another how to execute it.
-
-`CacheMap` is a description for calculating the cache key. It contains a digest that is combined with the cache keys of the inputs to determine the stable checksum that can be used to cache the operation result. For the vertexes that don't have inputs(roots), it is important that this digest is a stable secure checksum. For example, in LLB this digest is a manifest digest for container images or a commit SHA for git sources.
-
-`CacheMap` may also define optional selectors or content-based cache functions for its inputs. A selector is combined with the input cache key and useful for describing when different parts of an input are being used, and inputs cache key needs to be customized. Content-based cache function allows computing a new cache key for an input after it has completed. In LLB this is used for calculating cache key based on the checksum of file contents of the input snapshots.
-
-`Exec` executes the operation defined by a vertex by passing in the results of the inputs.
-
-
-### Shared graph
-
-After new build request is sent to the solver, it first loads all the vertexes to the shared graph structure. For status tracking, a job instance needs to be created, and vertexes are loaded through jobs. A job ID is assigned to every vertex. If vertex with the same digest has already been loaded to the shared graph, a new job ID is appended to the existing record. When the job finishes, it removes all of its references from the loaded vertex. The resources are released if no more references remain.
-
-Loading a vertex also creates a progress writer associated with it and sets up the cache sources associated with the specific vertex.
-
-After vertexes have been loaded to the job, it is safe to request a result from an edge pointing to a previously loaded vertex. To do this `build(ctx, Edge) (CachedResult, error)` method is called on the static scheduler instance associated with the solver.
-
-### Scheduler
-
-The scheduler is a component responsible for invoking the individual operations needed to find the result for the graph. While the build definition is defined with vertexes, the scheduler is solving edges. In the case of LLB solver, a result of a solved edge is associated with a snapshot. Usually, to solve an edge, the input edges need to be solved first and this can be done concurrently, but there are many exceptions like edge may be cached but its input might be not, or solving one input might cause a cache hit while solving others would just be wasteful. Scheduler tries do handle all these cases.
-
-The scheduler is implemented as a single threaded non-blocking event loop. The single threaded constraint is for simplicity and might be removed in the future - currently, it is not known if this would have any performance impact. All the events in the scheduler have one fixed sender and receiver. The interface for interacting with the scheduler is to create a "pipe" between a sender and a receiver. One or both sides of the pipe may be an edge instance of the graph. If a pipe is added it to the scheduler and an edge receives an event from the pipe, the scheduler will "unpark" that edge so it can process all the events it had received.
-
-The unpark handler for an edge needs to be non-blocking and execute quickly. The edge will process the data from the incoming events and update its internal state. When calling unpark, the scheduler has already separated out the sender and receiver sides of the pipes that in the code are referred as incoming and outgoing requests. The incoming requests are usually requests to retrieve a result or a cache key from an edge. If it appears that an edge doesn't have enough internal state to satisfy the requests, it can make new pipes and register them with the scheduler. These new pipes are generally of two types: ones asking for some async function to be completed and others that request an input edge to reach a specific state first.
-
-To avoid bugs and deadlocks in this logic, the unpark method needs to follow the following rules. If unpark has finished without completing all incoming requests it needs to create outgoing requests. Similarly, if an incoming request remains pending, at least one outgoing request needs to exist as well. Failing to comply with this rule will cause the scheduler to panic as a precaution to avoid leaks and hiding errors.
-
-### Edge state
-
-During unpark, edge state is incremented until it can fulfill the incoming requests.
-
-An edge can be in the following states: initial, cache-fast, cache-slow, completed. Completed edge contains a reference to the final result, in-progress edge may have zero or more cache keys.
-
-The initial state is the starting state for any edge. If a state has reached a cache-fast state, it means that all the definition based cache key lookups have been performed. Cache-slow means that content-based cache lookup has been performed as well. If possible, the scheduler will avoid looking up the slow keys of inputs if they are unnecessary for solving current edge.
-
-The unpark method is split into four phases. The first phase processes all incoming events (responses from outgoing requests or new incoming requests) that caused the unpark to be called. These contain responses from async functions like calls to get the cachemap, execution result or content-based checksum for an input, or responses from input edges when their state or number of cache keys has changed. All the results are stored in edge's internal state. For the new cache keys, a query is performed to determine if any of them can create potential matches to the current edge.
-
-After that, if any of the updates caused changes to edge's properties, a new state is calculated for the current vertex. In this step, all potential cache keys from inputs can cause new cache keys for the edge to be created and the status of an edge might be updated.
-
-Third, the edge will go over all of its incoming requests, to determine if the current internal state is sufficient for satisfying them all. There are a couple of possibilities how this check may end up. If all requests can be completed and there are no outgoing requests the requests finish and unpark method returns. If there are outgoing requests but the edge has reached the completed state or all incoming requests have been canceled, the outgoing requests are canceled. This is an async operation as well and will cause unpark to be called again after completion. If this condition didn't apply but requests could be completed and there are outgoing requests, then the incoming request is answered but not completed. The receiver can then decide to cancel this request if needed. If no new data has appeared to answer the incoming requests, the desired state for an edge is determined for an edge from the incoming requests, and we continue to the next step.
-
-The fourth step sets up outgoing requests based on the desired state determined in the third step. If the current state requires calling any async functions to move forward then it is done here. We will also loop through all the inputs to determine if it is important to raise their desired state. Depending on what inputs can produce content based cache keys and what inputs have already returned possible cache matches, the desired state for inputs may be raised at different times.
-
-When an edge needs to resolve an operation to call the async `CacheMap` and `Exec` methods, it does so by calling back to the shared graph. This makes sure that two different edges pointing to the same vertex do not execute twice. The result values for the operation that is shared by the edges is also cached until the vertex is cleaned up. Progress reporting is also handled and forwarded to the job through this shared vertex instance.
-
-Edge state is cleaned up when a final job that loaded the vertexes that they are connected to is discarded.
-
-
-### Cache providers
-
-Cache providers determine if there is a result that matches the cache keys generated during the build that could be reused instead of fully reevaluating the vertex and its inputs. There can be multiple cache providers, and specific providers can be defined per vertex using the vertex options.
-
-There are multiple backend implementations for cache providers, in-memory one used in unit tests, the default local one using bbolt and one based on cache manifests in a remote registry.
-
-Simplified cache provider has following methods:
-
-```go
-Query(...) ([]*CacheKey, error)
-Records(ck *CacheKey) ([]*CacheRecord, error)
-Load(ctx context.Context, rec *CacheRecord) (Result, error)
-Save(key *CacheKey, s Result) (*ExportableCacheKey, error)
-```
-
-Query method is used to determine if there exist a possible cache link between the input and a vertex. It takes parameters provided by `op.CacheMap` and cache keys returned by the calling the same method on its inputs.
-
-If a cache key has been found, the matching records can be asked for them. A cache key can have zero or more records. Having a record means that a cached result can be loaded for a specific vertex. The solver supports partial cache chains, meaning that not all inputs need to have a cache record to match cache for a vertex.
-
-Load method is used to load a specific record into a result reference. This value is the same type as the one returned by the `op.Exec` method.
-
-Save allows adding more records to the cache.
-
-### Merging edges
-
-One final piece of solver logic allows merging two edges into one when they have both returned the same cache key. In practice, this appears for example when a build uses image references `alpine:latest` and `alpine@sha256:abcabc` in its definition and they actually point to the same image. Another case where this appears is when same source files from different sources are being used as part of the build.
-
-After scheduler has called `unpark()` on an edge it checks it the method added any new cache keys to its state. If it did it will check its internal index if another active edge already exists with the same cache key. If it does it performs some basic validation, for example checking that the new edge has not explicitly asked cache to be ignored, and if it passes, merges the states of two edges.
-
-In the result of the merge, the edge that was checked is deleted, its ongoing requests are canceled and the incoming ones are added to the original edge.
\ No newline at end of file
diff --git a/examples/README.md b/examples/README.md
index e43ccda44654..0e0c13c17410 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -34,7 +34,7 @@ Different versions of the example scripts show different ways of describing the
- `./buildkit1` - cloning git repositories has been separated for extra concurrency.
- `./buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching.
- `./buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path`
-- `./buildkit4` - uses MergeOp to optimize copy chains for better caching behavior (see `docs/merge+diff.md` for more details)
+- `./buildkit4` - uses MergeOp to optimize copy chains for better caching behavior (see `docs/dev/merge-diff.md` for more details)
- `./dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes
- `./nested-llb` - shows how to use nested invocation to generate LLB
- `./gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies
diff --git a/examples/build-using-dockerfile/README.md b/examples/build-using-dockerfile/README.md
index b0087ad9c903..1ec17bdf198b 100644
--- a/examples/build-using-dockerfile/README.md
+++ b/examples/build-using-dockerfile/README.md
@@ -1,6 +1,5 @@
# `build-using-dockerfile` example
-:information_source: [BuildKit has been integrated to `docker build` since Docker 18.06.](https://docs.docker.com/develop/develop-images/build_enhancements/)
The `build-using-dockerfile` CLI is just provided as an example for writing a BuildKit client application.
For people familiar with `docker build` command, `build-using-dockerfile` is provided as an example for building Dockerfiles with BuildKit using a syntax similar to `docker build`.
diff --git a/examples/buildctl-daemonless/buildctl-daemonless.sh b/examples/buildctl-daemonless/buildctl-daemonless.sh
index 15885dadb783..ab181d16c7e2 100755
--- a/examples/buildctl-daemonless/buildctl-daemonless.sh
+++ b/examples/buildctl-daemonless/buildctl-daemonless.sh
@@ -19,7 +19,7 @@ set -eu
# * addr
# * log
tmp=$(mktemp -d /tmp/buildctl-daemonless.XXXXXX)
-trap "kill \$(cat $tmp/pid); wait \$(cat $tmp/pid) || true; rm -rf $tmp" EXIT
+trap "kill \$(cat $tmp/pid) || true; wait \$(cat $tmp/pid) || true; rm -rf $tmp" EXIT
startBuildkitd() {
addr=
diff --git a/examples/buildkit0/buildkit.go b/examples/buildkit0/buildkit.go
index 78f988d8584d..24810e730ff5 100644
--- a/examples/buildkit0/buildkit.go
+++ b/examples/buildkit0/buildkit.go
@@ -33,7 +33,7 @@ func main() {
}
func goBuildBase() llb.State {
- goAlpine := llb.Image("docker.io/library/golang:1.17-alpine")
+ goAlpine := llb.Image("docker.io/library/golang:1.19-alpine")
return goAlpine.
AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix).
AddEnv("GOPATH", "/go").
diff --git a/examples/buildkit1/buildkit.go b/examples/buildkit1/buildkit.go
index 98793d0ab6e2..9f8201b2bddf 100644
--- a/examples/buildkit1/buildkit.go
+++ b/examples/buildkit1/buildkit.go
@@ -33,7 +33,7 @@ func main() {
}
func goBuildBase() llb.State {
- goAlpine := llb.Image("docker.io/library/golang:1.17-alpine")
+ goAlpine := llb.Image("docker.io/library/golang:1.19-alpine")
return goAlpine.
AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix).
AddEnv("GOPATH", "/go").
diff --git a/examples/buildkit2/buildkit.go b/examples/buildkit2/buildkit.go
index 7a88562cf72e..5ae6b201678b 100644
--- a/examples/buildkit2/buildkit.go
+++ b/examples/buildkit2/buildkit.go
@@ -33,7 +33,7 @@ func main() {
}
func goBuildBase() llb.State {
- goAlpine := llb.Image("docker.io/library/golang:1.17-alpine")
+ goAlpine := llb.Image("docker.io/library/golang:1.19-alpine")
return goAlpine.
AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix).
AddEnv("GOPATH", "/go").
diff --git a/examples/buildkit3/buildkit.go b/examples/buildkit3/buildkit.go
index fa985f18eb84..58de1ebe0666 100644
--- a/examples/buildkit3/buildkit.go
+++ b/examples/buildkit3/buildkit.go
@@ -34,7 +34,7 @@ func main() {
}
func goBuildBase() llb.State {
- goAlpine := llb.Image("docker.io/library/golang:1.17-alpine")
+ goAlpine := llb.Image("docker.io/library/golang:1.19-alpine")
return goAlpine.
AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix).
AddEnv("GOPATH", "/go").
diff --git a/examples/buildkit4/buildkit.go b/examples/buildkit4/buildkit.go
index c20f13ea0c05..0bae9e7899e8 100644
--- a/examples/buildkit4/buildkit.go
+++ b/examples/buildkit4/buildkit.go
@@ -37,7 +37,7 @@ func main() {
}
func goBuildBase() llb.State {
- goAlpine := llb.Image("docker.io/library/golang:1.17-alpine")
+ goAlpine := llb.Image("docker.io/library/golang:1.19-alpine")
return goAlpine.
AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix).
AddEnv("GOPATH", "/go").
diff --git a/examples/dockerfile2llb/main.go b/examples/dockerfile2llb/main.go
index 94455565b1ec..2fd693a4ae80 100644
--- a/examples/dockerfile2llb/main.go
+++ b/examples/dockerfile2llb/main.go
@@ -2,9 +2,9 @@ package main
import (
"context"
+ "encoding/json"
"flag"
- "io/ioutil"
- "log"
+ "io"
"os"
"github.com/moby/buildkit/client/llb"
@@ -12,40 +12,66 @@ import (
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/appcontext"
+ "github.com/sirupsen/logrus"
)
type buildOpt struct {
- target string
+ target string
+ partialImageConfigFile string
+ partialMetadataFile string
}
func main() {
+ if err := xmain(); err != nil {
+ logrus.Fatal(err)
+ }
+}
+
+func xmain() error {
var opt buildOpt
flag.StringVar(&opt.target, "target", "", "target stage")
+ flag.StringVar(&opt.partialImageConfigFile, "partial-image-config-file", "", "Output partial image config as a JSON file")
+ flag.StringVar(&opt.partialMetadataFile, "partial-metadata-file", "", "Output partial metadata sa a JSON file")
flag.Parse()
- df, err := ioutil.ReadAll(os.Stdin)
+ df, err := io.ReadAll(os.Stdin)
if err != nil {
- panic(err)
+ return err
}
caps := pb.Caps.CapSet(pb.Caps.All())
- state, img, bi, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{
+ state, img, _, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{
MetaResolver: imagemetaresolver.Default(),
Target: opt.target,
LLBCaps: &caps,
})
if err != nil {
- log.Printf("err: %+v", err)
- panic(err)
+ return err
}
- _ = img
- _ = bi
-
dt, err := state.Marshal(context.TODO())
if err != nil {
- panic(err)
+ return err
+ }
+ if err := llb.WriteTo(dt, os.Stdout); err != nil {
+ return err
+ }
+ if opt.partialImageConfigFile != "" {
+ if err := writeJSON(opt.partialImageConfigFile, img); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeJSON(f string, x interface{}) error {
+ b, err := json.Marshal(x)
+ if err != nil {
+ return err
+ }
+ if err := os.RemoveAll(f); err != nil {
+ return err
}
- llb.WriteTo(dt, os.Stdout)
+ return os.WriteFile(f, b, 0o644)
}
diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md
index 66a36c102a9b..c8973dc5645d 100644
--- a/examples/kubernetes/README.md
+++ b/examples/kubernetes/README.md
@@ -7,8 +7,7 @@ This directory contains Kubernetes manifests for `Pod`, `Deployment` (with `Serv
* `Job`: good if you don't want to have daemon pods
Using Rootless mode (`*.rootless.yaml`) is recommended because Rootless mode image is executed as non-root user (UID 1000) and doesn't need `securityContext.privileged`.
-
-:warning: Rootless mode may not work on some host kernels. See [`../../docs/rootless.md`](../../docs/rootless.md).
+See [`../../docs/rootless.md`](../../docs/rootless.md).
See also ["Building Images Efficiently And Securely On Kubernetes With BuildKit" (KubeCon EU 2019)](https://kccnceu19.sched.com/event/MPX5).
diff --git a/examples/kubernetes/consistenthash/main.go b/examples/kubernetes/consistenthash/main.go
index ebe5f64f2840..4b100777ce3c 100644
--- a/examples/kubernetes/consistenthash/main.go
+++ b/examples/kubernetes/consistenthash/main.go
@@ -16,7 +16,7 @@ package main
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
"strings"
@@ -38,7 +38,7 @@ func xmain() error {
return errors.New("should not reach here")
}
key := os.Args[1]
- stdin, err := ioutil.ReadAll(os.Stdin)
+ stdin, err := io.ReadAll(os.Stdin)
if err != nil {
return err
}
diff --git a/examples/kubernetes/deployment+service.rootless.yaml b/examples/kubernetes/deployment+service.rootless.yaml
index 00f89f7342f9..0b554096fde6 100644
--- a/examples/kubernetes/deployment+service.rootless.yaml
+++ b/examples/kubernetes/deployment+service.rootless.yaml
@@ -15,7 +15,6 @@ spec:
app: buildkitd
annotations:
container.apparmor.security.beta.kubernetes.io/buildkitd: unconfined
- container.seccomp.security.alpha.kubernetes.io/buildkitd: unconfined
# see buildkit/docs/rootless.md for caveats of rootless mode
spec:
containers:
@@ -52,6 +51,9 @@ spec:
initialDelaySeconds: 5
periodSeconds: 30
securityContext:
+ # Needs Kubernetes >= 1.19
+ seccompProfile:
+ type: Unconfined
# To change UID/GID, you need to rebuild the image
runAsUser: 1000
runAsGroup: 1000
@@ -61,11 +63,19 @@ spec:
- name: certs
readOnly: true
mountPath: /certs
+ # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too,
+ # but the default VOLUME does not work with rootless on Google's Container-Optimized OS
+ # as it is mounted with `nosuid,nodev`.
+ # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038
+ - mountPath: /home/user/.local/share/buildkit
+ name: buildkitd
volumes:
# buildkit-daemon-certs must contain ca.pem, cert.pem, and key.pem
- name: certs
secret:
secretName: buildkit-daemon-certs
+ - name: buildkitd
+ emptyDir: {}
---
apiVersion: v1
kind: Service
diff --git a/examples/kubernetes/job.rootless.yaml b/examples/kubernetes/job.rootless.yaml
index 7c9941d05434..06e608c6ab35 100644
--- a/examples/kubernetes/job.rootless.yaml
+++ b/examples/kubernetes/job.rootless.yaml
@@ -7,7 +7,6 @@ spec:
metadata:
annotations:
container.apparmor.security.beta.kubernetes.io/buildkit: unconfined
- container.seccomp.security.alpha.kubernetes.io/buildkit: unconfined
# see buildkit/docs/rootless.md for caveats of rootless mode
spec:
restartPolicy: Never
@@ -43,6 +42,9 @@ spec:
# To push the image to a registry, add
# `--output type=image,name=docker.io/username/image,push=true`
securityContext:
+ # Needs Kubernetes >= 1.19
+ seccompProfile:
+ type: Unconfined
# To change UID/GID, you need to rebuild the image
runAsUser: 1000
runAsGroup: 1000
@@ -50,8 +52,16 @@ spec:
- name: workspace
readOnly: true
mountPath: /workspace
+ # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too,
+ # but the default VOLUME does not work with rootless on Google's Container-Optimized OS
+ # as it is mounted with `nosuid,nodev`.
+ # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038
+ - mountPath: /home/user/.local/share/buildkit
+ name: buildkitd
# To push the image, you also need to create `~/.docker/config.json` secret
# and set $DOCKER_CONFIG to `/path/to/.docker` directory.
volumes:
- name: workspace
emptyDir: {}
+ - name: buildkitd
+ emptyDir: {}
diff --git a/examples/kubernetes/pod.rootless.yaml b/examples/kubernetes/pod.rootless.yaml
index ea63b35d177a..130ea43633fe 100644
--- a/examples/kubernetes/pod.rootless.yaml
+++ b/examples/kubernetes/pod.rootless.yaml
@@ -4,7 +4,6 @@ metadata:
name: buildkitd
annotations:
container.apparmor.security.beta.kubernetes.io/buildkitd: unconfined
- container.seccomp.security.alpha.kubernetes.io/buildkitd: unconfined
# see buildkit/docs/rootless.md for caveats of rootless mode
spec:
containers:
@@ -29,6 +28,19 @@ spec:
initialDelaySeconds: 5
periodSeconds: 30
securityContext:
+ # Needs Kubernetes >= 1.19
+ seccompProfile:
+ type: Unconfined
# To change UID/GID, you need to rebuild the image
runAsUser: 1000
runAsGroup: 1000
+ volumeMounts:
+ # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too,
+ # but the default VOLUME does not work with rootless on Google's Container-Optimized OS
+ # as it is mounted with `nosuid,nodev`.
+ # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038
+ - mountPath: /home/user/.local/share/buildkit
+ name: buildkitd
+ volumes:
+ - name: buildkitd
+ emptyDir: {}
diff --git a/examples/kubernetes/statefulset.rootless.yaml b/examples/kubernetes/statefulset.rootless.yaml
index e67c5a0bf246..0533d2a1004f 100644
--- a/examples/kubernetes/statefulset.rootless.yaml
+++ b/examples/kubernetes/statefulset.rootless.yaml
@@ -17,7 +17,6 @@ spec:
app: buildkitd
annotations:
container.apparmor.security.beta.kubernetes.io/buildkitd: unconfined
- container.seccomp.security.alpha.kubernetes.io/buildkitd: unconfined
# see buildkit/docs/rootless.md for caveats of rootless mode
spec:
containers:
@@ -42,6 +41,19 @@ spec:
initialDelaySeconds: 5
periodSeconds: 30
securityContext:
+ # Needs Kubernetes >= 1.19
+ seccompProfile:
+ type: Unconfined
# To change UID/GID, you need to rebuild the image
runAsUser: 1000
runAsGroup: 1000
+ volumeMounts:
+ # Dockerfile has `VOLUME /home/user/.local/share/buildkit` by default too,
+ # but the default VOLUME does not work with rootless on Google's Container-Optimized OS
+ # as it is mounted with `nosuid,nodev`.
+ # https://github.com/moby/buildkit/issues/879#issuecomment-1240347038
+ - mountPath: /home/user/.local/share/buildkit
+ name: buildkitd
+ volumes:
+ - name: buildkitd
+ emptyDir: {}
diff --git a/examples/nested-llb/main.go b/examples/nested-llb/main.go
index 74eb58864af1..93f9d41c2dcc 100644
--- a/examples/nested-llb/main.go
+++ b/examples/nested-llb/main.go
@@ -32,7 +32,7 @@ func main() {
}
func goBuildBase() llb.State {
- goAlpine := llb.Image("docker.io/library/golang:1.17-alpine")
+ goAlpine := llb.Image("docker.io/library/golang:1.19-alpine")
return goAlpine.
AddEnv("PATH", "/usr/local/go/bin:"+system.DefaultPathEnvUnix).
AddEnv("GOPATH", "/go").
diff --git a/executor/containerdexecutor/executor.go b/executor/containerdexecutor/executor.go
index 43a05cccefe9..ac195c431588 100644
--- a/executor/containerdexecutor/executor.go
+++ b/executor/containerdexecutor/executor.go
@@ -3,7 +3,6 @@ package containerdexecutor
import (
"context"
"io"
- "io/ioutil"
"os"
"path/filepath"
"sync"
@@ -41,12 +40,26 @@ type containerdExecutor struct {
running map[string]chan error
mu sync.Mutex
apparmorProfile string
+ selinux bool
traceSocket string
rootless bool
}
+// OnCreateRuntimer provides an alternative to OCI hooks for applying network
+// configuration to a container. If the [network.Provider] returns a
+// [network.Namespace] which also implements this interface, the containerd
+// executor will run the callback at the appropriate point in the container
+// lifecycle.
+type OnCreateRuntimer interface {
+ // OnCreateRuntime is analogous to the createRuntime OCI hook. The
+ // function is called after the container is created, before the user
+ // process has been executed. The argument is the container PID in the
+ // runtime namespace.
+ OnCreateRuntime(pid uint32) error
+}
+
// New creates a new executor backed by connection to containerd API
-func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, traceSocket string, rootless bool) executor.Executor {
+func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, selinux bool, traceSocket string, rootless bool) executor.Executor {
// clean up old hosts/resolv.conf file. ignore errors
os.RemoveAll(filepath.Join(root, "hosts"))
os.RemoveAll(filepath.Join(root, "resolv.conf"))
@@ -59,6 +72,7 @@ func New(client *containerd.Client, root, cgroup string, networkProviders map[pb
dnsConfig: dnsConfig,
running: make(map[string]chan error),
apparmorProfile: apparmorProfile,
+ selinux: selinux,
traceSocket: traceSocket,
rootless: rootless,
}
@@ -121,7 +135,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
return err
}
defer lm.Unmount()
- defer executor.MountStubsCleaner(rootfsPath, mounts)()
+ defer executor.MountStubsCleaner(rootfsPath, mounts, meta.RemoveMountStubsRecursive)()
uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User)
if err != nil {
@@ -147,7 +161,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
if !ok {
return errors.Errorf("unknown network mode %s", meta.NetMode)
}
- namespace, err := provider.New()
+ namespace, err := provider.New(ctx, meta.Hostname)
if err != nil {
return err
}
@@ -163,7 +177,7 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
}
processMode := oci.ProcessSandbox // FIXME(AkihiroSuda)
- spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.traceSocket, opts...)
+ spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.selinux, w.traceSocket, opts...)
if err != nil {
return err
}
@@ -204,11 +218,17 @@ func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.M
}
defer func() {
- if _, err1 := task.Delete(context.TODO()); err == nil && err1 != nil {
+ if _, err1 := task.Delete(context.TODO(), containerd.WithProcessKill); err == nil && err1 != nil {
err = errors.Wrapf(err1, "failed to delete task %s", id)
}
}()
+ if nn, ok := namespace.(OnCreateRuntimer); ok {
+ if err := nn.OnCreateRuntime(task.Pid()); err != nil {
+ return err
+ }
+ }
+
trace.SpanFromContext(ctx).AddEvent("Container created")
err = w.runProcess(ctx, task, process.Resize, process.Signal, func() {
startedOnce.Do(func() {
@@ -315,10 +335,10 @@ func fixProcessOutput(process *executor.ProcessInfo) {
// failed to start io pipe copy: unable to copy pipes: containerd-shim: opening file "" failed: open : no such file or directory: unknown
// So just stub out any missing output
if process.Stdout == nil {
- process.Stdout = &nopCloser{ioutil.Discard}
+ process.Stdout = &nopCloser{io.Discard}
}
if process.Stderr == nil {
- process.Stderr = &nopCloser{ioutil.Discard}
+ process.Stderr = &nopCloser{io.Discard}
}
}
diff --git a/executor/executor.go b/executor/executor.go
index 4727af4b03ef..a323bcc9cc94 100644
--- a/executor/executor.go
+++ b/executor/executor.go
@@ -23,6 +23,8 @@ type Meta struct {
CgroupParent string
NetMode pb.NetMode
SecurityMode pb.SecurityMode
+
+ RemoveMountStubsRecursive bool
}
type Mountable interface {
diff --git a/executor/oci/hosts.go b/executor/oci/hosts.go
index d0505c28ccd9..0d193555c941 100644
--- a/executor/oci/hosts.go
+++ b/executor/oci/hosts.go
@@ -4,7 +4,6 @@ import (
"bytes"
"context"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -56,7 +55,7 @@ func makeHostsFile(stateDir string, extraHosts []executor.HostIP, idmap *idtools
}
tmpPath := p + ".tmp"
- if err := ioutil.WriteFile(tmpPath, b.Bytes(), 0644); err != nil {
+ if err := os.WriteFile(tmpPath, b.Bytes(), 0644); err != nil {
return "", nil, err
}
diff --git a/executor/oci/resolvconf.go b/executor/oci/resolvconf.go
index c510a1a1bc18..3ac0feda7aea 100644
--- a/executor/oci/resolvconf.go
+++ b/executor/oci/resolvconf.go
@@ -2,12 +2,10 @@ package oci
import (
"context"
- "io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/libnetwork/resolvconf"
- "github.com/docker/docker/libnetwork/types"
"github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/pkg/errors"
@@ -74,7 +72,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
if dns != nil {
var (
- dnsNameservers = resolvconf.GetNameservers(dt, types.IP)
+ dnsNameservers = resolvconf.GetNameservers(dt, resolvconf.IP)
dnsSearchDomains = resolvconf.GetSearchDomains(dt)
dnsOptions = resolvconf.GetOptions(dt)
)
@@ -101,7 +99,7 @@ func GetResolvConf(ctx context.Context, stateDir string, idmap *idtools.Identity
}
tmpPath := p + ".tmp"
- if err := ioutil.WriteFile(tmpPath, f.Content, 0644); err != nil {
+ if err := os.WriteFile(tmpPath, f.Content, 0644); err != nil {
return "", err
}
diff --git a/executor/oci/resolvconf_test.go b/executor/oci/resolvconf_test.go
index 316bcefda283..ec073885eb34 100644
--- a/executor/oci/resolvconf_test.go
+++ b/executor/oci/resolvconf_test.go
@@ -2,7 +2,6 @@ package oci
import (
"context"
- "io/ioutil"
"os"
"testing"
@@ -27,13 +26,10 @@ nameserver 8.8.4.4
nameserver 2001:4860:4860::8888
nameserver 2001:4860:4860::8844`
- dir, err := ioutil.TempDir("", "buildkit-test")
- require.NoError(t, err)
- defer os.RemoveAll(dir)
ctx := context.Background()
- p, err := GetResolvConf(ctx, dir, nil, nil)
+ p, err := GetResolvConf(ctx, t.TempDir(), nil, nil)
require.NoError(t, err)
- b, err := ioutil.ReadFile(p)
+ b, err := os.ReadFile(p)
require.NoError(t, err)
require.Equal(t, string(b), defaultResolvConf)
}
diff --git a/executor/oci/spec.go b/executor/oci/spec.go
index ea8741995a11..94b48a7aa9ff 100644
--- a/executor/oci/spec.go
+++ b/executor/oci/spec.go
@@ -50,7 +50,7 @@ func (pm ProcessMode) String() string {
// GenerateSpec generates spec using containerd functionality.
// opts are ignored for s.Process, s.Hostname, and s.Mounts .
-func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, cgroupParent string, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, tracingSocket string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
+func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, cgroupParent string, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, selinuxB bool, tracingSocket string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) {
c := &containers.Container{
ID: id,
}
@@ -81,7 +81,7 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou
return nil, nil, err
}
- if securityOpts, err := generateSecurityOpts(meta.SecurityMode, apparmorProfile); err == nil {
+ if securityOpts, err := generateSecurityOpts(meta.SecurityMode, apparmorProfile, selinuxB); err == nil {
opts = append(opts, securityOpts...)
} else {
return nil, nil, err
diff --git a/executor/oci/spec_unix.go b/executor/oci/spec_unix.go
index 5f4908ca6b6c..f906f79b6bac 100644
--- a/executor/oci/spec_unix.go
+++ b/executor/oci/spec_unix.go
@@ -16,7 +16,9 @@ import (
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/entitlements/security"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
+ "github.com/pkg/errors"
)
func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
@@ -30,7 +32,10 @@ func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
}
// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
-func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) (opts []oci.SpecOpts, _ error) {
+func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) (opts []oci.SpecOpts, _ error) {
+ if selinuxB && !selinux.GetEnabled() {
+ return nil, errors.New("selinux is not available")
+ }
switch mode {
case pb.SecurityMode_INSECURE:
return []oci.SpecOpts{
@@ -39,7 +44,9 @@ func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) (opts []
oci.WithWriteableSysfs,
func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
var err error
- s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"})
+ if selinuxB {
+ s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels([]string{"disable"})
+ }
return err
},
}, nil
@@ -52,7 +59,9 @@ func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) (opts []
}
opts = append(opts, func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
var err error
- s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil)
+ if selinuxB {
+ s.Process.SelinuxLabel, s.Linux.MountLabel, err = label.InitLabels(nil)
+ }
return err
})
return opts, nil
@@ -77,7 +86,7 @@ func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) {
return nil, nil
}
return []oci.SpecOpts{
- oci.WithUserNamespace(specMapping(idmap.UIDs()), specMapping(idmap.GIDs())),
+ oci.WithUserNamespace(specMapping(idmap.UIDMaps), specMapping(idmap.GIDMaps)),
}, nil
}
diff --git a/executor/oci/spec_windows.go b/executor/oci/spec_windows.go
index bc1a6261e284..48b0969e3922 100644
--- a/executor/oci/spec_windows.go
+++ b/executor/oci/spec_windows.go
@@ -15,7 +15,7 @@ func generateMountOpts(resolvConf, hostsFile string) ([]oci.SpecOpts, error) {
}
// generateSecurityOpts may affect mounts, so must be called after generateMountOpts
-func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) ([]oci.SpecOpts, error) {
+func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string, selinuxB bool) ([]oci.SpecOpts, error) {
if mode == pb.SecurityMode_INSECURE {
return nil, errors.New("no support for running in insecure mode on Windows")
}
diff --git a/executor/oci/user.go b/executor/oci/user.go
index eb459f391fbe..bb58e834f634 100644
--- a/executor/oci/user.go
+++ b/executor/oci/user.go
@@ -91,6 +91,7 @@ func parseUID(str string) (uint32, error) {
// once the PR in containerd is merged we should remove this function.
func WithUIDGID(uid, gid uint32, sgids []uint32) containerdoci.SpecOpts {
return func(_ context.Context, _ containerdoci.Client, _ *containers.Container, s *containerdoci.Spec) error {
+ defer ensureAdditionalGids(s)
setProcess(s)
s.Process.User.UID = uid
s.Process.User.GID = gid
@@ -106,3 +107,15 @@ func setProcess(s *containerdoci.Spec) {
s.Process = &specs.Process{}
}
}
+
+// ensureAdditionalGids ensures that the primary GID is also included in the additional GID list.
+// From https://github.com/containerd/containerd/blob/v1.7.0-beta.4/oci/spec_opts.go#L124-L133
+func ensureAdditionalGids(s *containerdoci.Spec) {
+ setProcess(s)
+ for _, f := range s.Process.User.AdditionalGids {
+ if f == s.Process.User.GID {
+ return
+ }
+ }
+ s.Process.User.AdditionalGids = append([]uint32{s.Process.User.GID}, s.Process.User.AdditionalGids...)
+}
diff --git a/executor/runcexecutor/executor.go b/executor/runcexecutor/executor.go
index 702d513102b2..213ebb73665a 100644
--- a/executor/runcexecutor/executor.go
+++ b/executor/runcexecutor/executor.go
@@ -48,6 +48,7 @@ type Opt struct {
DNS *oci.DNSConfig
OOMScoreAdj *int
ApparmorProfile string
+ SELinux bool
TracingSocket string
}
@@ -67,6 +68,7 @@ type runcExecutor struct {
running map[string]chan error
mu sync.Mutex
apparmorProfile string
+ selinux bool
tracingSocket string
}
@@ -131,6 +133,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex
oomScoreAdj: opt.OOMScoreAdj,
running: make(map[string]chan error),
apparmorProfile: opt.ApparmorProfile,
+ selinux: opt.SELinux,
tracingSocket: opt.TracingSocket,
}
return w, nil
@@ -161,7 +164,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
if !ok {
return errors.Errorf("unknown network mode %s", meta.NetMode)
}
- namespace, err := provider.New()
+ namespace, err := provider.New(ctx, meta.Hostname)
if err != nil {
return err
}
@@ -221,7 +224,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
}
defer mount.Unmount(rootFSPath, 0)
- defer executor.MountStubsCleaner(rootFSPath, mounts)()
+ defer executor.MountStubsCleaner(rootFSPath, mounts, meta.RemoveMountStubsRecursive)()
uid, gid, sgids, err := oci.GetUser(rootFSPath, meta.User)
if err != nil {
@@ -251,7 +254,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount,
}
}
- spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.tracingSocket, opts...)
+ spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.selinux, w.tracingSocket, opts...)
if err != nil {
return err
}
diff --git a/executor/stubs.go b/executor/stubs.go
index 2c13b13053a4..22a8ac1310c4 100644
--- a/executor/stubs.go
+++ b/executor/stubs.go
@@ -7,9 +7,11 @@ import (
"syscall"
"github.com/containerd/continuity/fs"
+ "github.com/moby/buildkit/util/system"
+ "github.com/sirupsen/logrus"
)
-func MountStubsCleaner(dir string, mounts []Mount) func() {
+func MountStubsCleaner(dir string, mounts []Mount, recursive bool) func() {
names := []string{"/etc/resolv.conf", "/etc/hosts"}
for _, m := range mounts {
@@ -28,9 +30,22 @@ func MountStubsCleaner(dir string, mounts []Mount) func() {
continue
}
- _, err = os.Lstat(realPath)
- if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) {
+ for {
+ _, err = os.Lstat(realPath)
+ if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR)) {
+ break
+ }
paths = append(paths, realPath)
+
+ if !recursive {
+ break
+ }
+
+ realPathNext := filepath.Dir(realPath)
+ if realPath == realPathNext {
+ break
+ }
+ realPath = realPathNext
}
}
@@ -40,10 +55,41 @@ func MountStubsCleaner(dir string, mounts []Mount) func() {
if err != nil {
continue
}
- if st.Size() != 0 {
+ if st.IsDir() {
+ entries, err := os.ReadDir(p)
+ if err != nil {
+ continue
+ }
+ if len(entries) != 0 {
+ continue
+ }
+ } else if st.Size() != 0 {
continue
}
- os.Remove(p)
+
+ // Back up the timestamps of the dir for reproducible builds
+ // https://github.com/moby/buildkit/issues/3148
+ dir := filepath.Dir(p)
+ dirSt, err := os.Stat(dir)
+ if err != nil {
+ logrus.WithError(err).Warnf("Failed to stat %q (parent of mount stub %q)", dir, p)
+ continue
+ }
+ mtime := dirSt.ModTime()
+ atime, err := system.Atime(dirSt)
+ if err != nil {
+ logrus.WithError(err).Warnf("Failed to stat atime of %q (parent of mount stub %q)", dir, p)
+ atime = mtime
+ }
+
+ if err := os.Remove(p); err != nil {
+ logrus.WithError(err).Warnf("Failed to remove mount stub %q", p)
+ }
+
+ // Restore the timestamps of the dir
+ if err := os.Chtimes(dir, atime, mtime); err != nil {
+ logrus.WithError(err).Warnf("Failed to restore time time mount stub timestamp (os.Chtimes(%q, %v, %v))", dir, atime, mtime)
+ }
}
}
}
diff --git a/exporter/attestation/filter.go b/exporter/attestation/filter.go
new file mode 100644
index 000000000000..5abc234b875e
--- /dev/null
+++ b/exporter/attestation/filter.go
@@ -0,0 +1,45 @@
+package attestation
+
+import (
+ "bytes"
+
+ "github.com/moby/buildkit/exporter"
+)
+
+func Filter(attestations []exporter.Attestation, include map[string][]byte, exclude map[string][]byte) []exporter.Attestation {
+ if len(include) == 0 && len(exclude) == 0 {
+ return attestations
+ }
+
+ result := []exporter.Attestation{}
+ for _, att := range attestations {
+ meta := att.Metadata
+ if meta == nil {
+ meta = map[string][]byte{}
+ }
+
+ match := true
+ for k, v := range include {
+ if !bytes.Equal(meta[k], v) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ for k, v := range exclude {
+ if bytes.Equal(meta[k], v) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ result = append(result, att)
+ }
+ return result
+}
diff --git a/exporter/attestation/make.go b/exporter/attestation/make.go
new file mode 100644
index 000000000000..8ed910c1e8d3
--- /dev/null
+++ b/exporter/attestation/make.go
@@ -0,0 +1,138 @@
+package attestation
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+
+ "github.com/containerd/continuity/fs"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/moby/buildkit/exporter"
+ gatewaypb "github.com/moby/buildkit/frontend/gateway/pb"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/snapshot"
+ "github.com/moby/buildkit/solver/result"
+ "github.com/pkg/errors"
+ "golang.org/x/sync/errgroup"
+)
+
+// ReadAll reads the content of an attestation.
+func ReadAll(ctx context.Context, s session.Group, att exporter.Attestation) ([]byte, error) {
+ var content []byte
+ if att.ContentFunc != nil {
+ data, err := att.ContentFunc()
+ if err != nil {
+ return nil, err
+ }
+ content = data
+ } else if att.Ref != nil {
+ mount, err := att.Ref.Mount(ctx, true, s)
+ if err != nil {
+ return nil, err
+ }
+ lm := snapshot.LocalMounter(mount)
+ src, err := lm.Mount()
+ if err != nil {
+ return nil, err
+ }
+ defer lm.Unmount()
+
+ p, err := fs.RootPath(src, att.Path)
+ if err != nil {
+ return nil, err
+ }
+ content, err = os.ReadFile(p)
+ if err != nil {
+ return nil, errors.Wrap(err, "cannot read in-toto attestation")
+ }
+ } else {
+ return nil, errors.New("no available content for attestation")
+ }
+ if len(content) == 0 {
+ content = nil
+ }
+ return content, nil
+}
+
+// MakeInTotoStatements iterates over all provided result attestations and
+// generates intoto attestation statements.
+func MakeInTotoStatements(ctx context.Context, s session.Group, attestations []exporter.Attestation, defaultSubjects []intoto.Subject) ([]intoto.Statement, error) {
+ eg, ctx := errgroup.WithContext(ctx)
+ statements := make([]intoto.Statement, len(attestations))
+
+ for i, att := range attestations {
+ i, att := i, att
+ eg.Go(func() error {
+ content, err := ReadAll(ctx, s, att)
+ if err != nil {
+ return err
+ }
+
+ switch att.Kind {
+ case gatewaypb.AttestationKindInToto:
+ stmt, err := makeInTotoStatement(ctx, content, att, defaultSubjects)
+ if err != nil {
+ return err
+ }
+ statements[i] = *stmt
+ case gatewaypb.AttestationKindBundle:
+ return errors.New("bundle attestation kind must be un-bundled first")
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+ return statements, nil
+}
+
+func makeInTotoStatement(ctx context.Context, content []byte, attestation exporter.Attestation, defaultSubjects []intoto.Subject) (*intoto.Statement, error) {
+ if len(attestation.InToto.Subjects) == 0 {
+ attestation.InToto.Subjects = []result.InTotoSubject{{
+ Kind: gatewaypb.InTotoSubjectKindSelf,
+ }}
+ }
+ subjects := []intoto.Subject{}
+ for _, subject := range attestation.InToto.Subjects {
+ subjectName := "_"
+ if subject.Name != "" {
+ subjectName = subject.Name
+ }
+
+ switch subject.Kind {
+ case gatewaypb.InTotoSubjectKindSelf:
+ for _, defaultSubject := range defaultSubjects {
+ subjectNames := []string{}
+ subjectNames = append(subjectNames, defaultSubject.Name)
+ if subjectName != "_" {
+ subjectNames = append(subjectNames, subjectName)
+ }
+
+ for _, name := range subjectNames {
+ subjects = append(subjects, intoto.Subject{
+ Name: name,
+ Digest: defaultSubject.Digest,
+ })
+ }
+ }
+ case gatewaypb.InTotoSubjectKindRaw:
+ subjects = append(subjects, intoto.Subject{
+ Name: subjectName,
+ Digest: result.ToDigestMap(subject.Digest...),
+ })
+ default:
+ return nil, errors.Errorf("unknown attestation subject type %T", subject)
+ }
+ }
+
+ stmt := intoto.Statement{
+ StatementHeader: intoto.StatementHeader{
+ Type: intoto.StatementInTotoV01,
+ PredicateType: attestation.InToto.PredicateType,
+ Subject: subjects,
+ },
+ Predicate: json.RawMessage(content),
+ }
+ return &stmt, nil
+}
diff --git a/exporter/attestation/unbundle.go b/exporter/attestation/unbundle.go
new file mode 100644
index 000000000000..a2120d7975e1
--- /dev/null
+++ b/exporter/attestation/unbundle.go
@@ -0,0 +1,192 @@
+package attestation
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "path"
+ "strings"
+
+ "github.com/containerd/continuity/fs"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/moby/buildkit/exporter"
+ gatewaypb "github.com/moby/buildkit/frontend/gateway/pb"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/snapshot"
+ "github.com/moby/buildkit/solver/result"
+ "github.com/pkg/errors"
+ "golang.org/x/sync/errgroup"
+)
+
+// Unbundle iterates over all provided result attestations and un-bundles any
+// bundled attestations by loading them from the provided refs map.
+func Unbundle(ctx context.Context, s session.Group, bundled []exporter.Attestation) ([]exporter.Attestation, error) {
+ if err := Validate(bundled); err != nil {
+ return nil, err
+ }
+
+ eg, ctx := errgroup.WithContext(ctx)
+ unbundled := make([][]exporter.Attestation, len(bundled))
+
+ for i, att := range bundled {
+ i, att := i, att
+ eg.Go(func() error {
+ switch att.Kind {
+ case gatewaypb.AttestationKindInToto:
+ if strings.HasPrefix(att.InToto.PredicateType, "https://slsa.dev/provenance/") {
+ if att.ContentFunc == nil {
+ // provenance may only be set buildkit-side using ContentFunc
+ return errors.New("frontend may not set provenance attestations")
+ }
+ }
+ unbundled[i] = append(unbundled[i], att)
+ case gatewaypb.AttestationKindBundle:
+ if att.ContentFunc != nil {
+ return errors.New("attestation bundle cannot have callback")
+ }
+ if att.Ref == nil {
+ return errors.Errorf("no ref provided for attestation bundle")
+ }
+
+ mount, err := att.Ref.Mount(ctx, true, s)
+ if err != nil {
+ return err
+ }
+ lm := snapshot.LocalMounter(mount)
+ src, err := lm.Mount()
+ if err != nil {
+ return err
+ }
+ defer lm.Unmount()
+
+ atts, err := unbundle(ctx, src, att)
+ if err != nil {
+ return err
+ }
+ for _, att := range atts {
+ if strings.HasPrefix(att.InToto.PredicateType, "https://slsa.dev/provenance/") {
+ return errors.New("frontend may not bundle provenance attestations")
+ }
+ }
+ unbundled[i] = append(unbundled[i], atts...)
+ }
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ var joined []exporter.Attestation
+ for _, atts := range unbundled {
+ joined = append(joined, atts...)
+ }
+ joined = sort(joined)
+
+ if err := Validate(joined); err != nil {
+ return nil, err
+ }
+ return joined, nil
+}
+
+func sort(atts []exporter.Attestation) []exporter.Attestation {
+ isCore := make([]bool, len(atts))
+ for i, att := range atts {
+ name, ok := att.Metadata[result.AttestationSBOMCore]
+ if !ok {
+ continue
+ }
+ if n, _, _ := strings.Cut(att.Path, "."); n != string(name) {
+ continue
+ }
+ isCore[i] = true
+ }
+
+ result := make([]exporter.Attestation, 0, len(atts))
+ for i, att := range atts {
+ if isCore[i] {
+ result = append(result, att)
+ }
+ }
+ for i, att := range atts {
+ if !isCore[i] {
+ result = append(result, att)
+ }
+ }
+ return result
+}
+
+func unbundle(ctx context.Context, root string, bundle exporter.Attestation) ([]exporter.Attestation, error) {
+ dir, err := fs.RootPath(root, bundle.Path)
+ if err != nil {
+ return nil, err
+ }
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+
+ var unbundled []exporter.Attestation
+ for _, entry := range entries {
+ p, err := fs.RootPath(dir, entry.Name())
+ if err != nil {
+ return nil, err
+ }
+ f, err := os.Open(p)
+ if err != nil {
+ return nil, err
+ }
+ dec := json.NewDecoder(f)
+ var stmt intoto.Statement
+ if err := dec.Decode(&stmt); err != nil {
+ return nil, errors.Wrap(err, "cannot decode in-toto statement")
+ }
+ if bundle.InToto.PredicateType != "" && stmt.PredicateType != bundle.InToto.PredicateType {
+ return nil, errors.Errorf("bundle entry %s does not match required predicate type %s", stmt.PredicateType, bundle.InToto.PredicateType)
+ }
+
+ predicate, err := json.Marshal(stmt.Predicate)
+ if err != nil {
+ return nil, err
+ }
+
+ subjects := make([]result.InTotoSubject, len(stmt.Subject))
+ for i, subject := range stmt.Subject {
+ subjects[i] = result.InTotoSubject{
+ Kind: gatewaypb.InTotoSubjectKindRaw,
+ Name: subject.Name,
+ Digest: result.FromDigestMap(subject.Digest),
+ }
+ }
+ unbundled = append(unbundled, exporter.Attestation{
+ Kind: gatewaypb.AttestationKindInToto,
+ Metadata: bundle.Metadata,
+ Path: path.Join(bundle.Path, entry.Name()),
+ ContentFunc: func() ([]byte, error) { return predicate, nil },
+ InToto: result.InTotoAttestation{
+ PredicateType: stmt.PredicateType,
+ Subjects: subjects,
+ },
+ })
+ }
+ return unbundled, nil
+}
+
+func Validate(atts []exporter.Attestation) error {
+ for _, att := range atts {
+ if err := validate(att); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func validate(att exporter.Attestation) error {
+ if att.Kind != gatewaypb.AttestationKindBundle && att.Path == "" {
+ return errors.New("attestation does not have set path")
+ }
+ if att.Ref == nil && att.ContentFunc == nil {
+ return errors.New("attestation does not have available content")
+ }
+ return nil
+}
diff --git a/exporter/containerimage/annotations.go b/exporter/containerimage/annotations.go
new file mode 100644
index 000000000000..cdb5e945096c
--- /dev/null
+++ b/exporter/containerimage/annotations.go
@@ -0,0 +1,139 @@
+package containerimage
+
+import (
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/moby/buildkit/exporter/containerimage/exptypes"
+)
+
+type Annotations struct {
+ Index map[string]string
+ IndexDescriptor map[string]string
+ Manifest map[string]string
+ ManifestDescriptor map[string]string
+}
+
+// AnnotationsGroup is a map of annotations keyed by the reference key
+type AnnotationsGroup map[string]*Annotations
+
+func ParseAnnotations(data map[string][]byte) (AnnotationsGroup, map[string][]byte, error) {
+ ag := make(AnnotationsGroup)
+ rest := make(map[string][]byte)
+
+ for k, v := range data {
+ a, ok, err := exptypes.ParseAnnotationKey(k)
+ if !ok {
+ rest[k] = v
+ continue
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+
+ p := a.PlatformString()
+
+ if ag[p] == nil {
+ ag[p] = &Annotations{
+ IndexDescriptor: make(map[string]string),
+ Index: make(map[string]string),
+ Manifest: make(map[string]string),
+ ManifestDescriptor: make(map[string]string),
+ }
+ }
+
+ switch a.Type {
+ case exptypes.AnnotationIndex:
+ ag[p].Index[a.Key] = string(v)
+ case exptypes.AnnotationIndexDescriptor:
+ ag[p].IndexDescriptor[a.Key] = string(v)
+ case exptypes.AnnotationManifest:
+ ag[p].Manifest[a.Key] = string(v)
+ case exptypes.AnnotationManifestDescriptor:
+ ag[p].ManifestDescriptor[a.Key] = string(v)
+ default:
+ return nil, nil, errors.Errorf("unrecognized annotation type %s", a.Type)
+ }
+ }
+ return ag, rest, nil
+}
+
+func (ag AnnotationsGroup) Platform(p *ocispecs.Platform) *Annotations {
+ res := &Annotations{
+ IndexDescriptor: make(map[string]string),
+ Index: make(map[string]string),
+ Manifest: make(map[string]string),
+ ManifestDescriptor: make(map[string]string),
+ }
+
+ ps := []string{""}
+ if p != nil {
+ ps = append(ps, platforms.Format(*p))
+ }
+
+ for _, a := range ag {
+ for k, v := range a.Index {
+ res.Index[k] = v
+ }
+ for k, v := range a.IndexDescriptor {
+ res.IndexDescriptor[k] = v
+ }
+ }
+ for _, pk := range ps {
+ if _, ok := ag[pk]; !ok {
+ continue
+ }
+
+ for k, v := range ag[pk].Manifest {
+ res.Manifest[k] = v
+ }
+ for k, v := range ag[pk].ManifestDescriptor {
+ res.ManifestDescriptor[k] = v
+ }
+ }
+ return res
+}
+
+func (ag AnnotationsGroup) Merge(other AnnotationsGroup) AnnotationsGroup {
+ if other == nil {
+ return ag
+ }
+ if ag == nil {
+ ag = make(AnnotationsGroup)
+ }
+
+ for k, v := range other {
+ ag[k] = ag[k].merge(v)
+ }
+ return ag
+}
+
+func (a *Annotations) merge(other *Annotations) *Annotations {
+ if other == nil {
+ return a
+ }
+ if a == nil {
+ a = &Annotations{
+ IndexDescriptor: make(map[string]string),
+ Index: make(map[string]string),
+ Manifest: make(map[string]string),
+ ManifestDescriptor: make(map[string]string),
+ }
+ }
+
+ for k, v := range other.Index {
+ a.Index[k] = v
+ }
+ for k, v := range other.IndexDescriptor {
+ a.IndexDescriptor[k] = v
+ }
+ for k, v := range other.Manifest {
+ a.Manifest[k] = v
+ }
+ for k, v := range other.ManifestDescriptor {
+ a.ManifestDescriptor[k] = v
+ }
+
+ return a
+}
diff --git a/exporter/containerimage/attestations.go b/exporter/containerimage/attestations.go
new file mode 100644
index 000000000000..a41c6039f0ba
--- /dev/null
+++ b/exporter/containerimage/attestations.go
@@ -0,0 +1,220 @@
+package containerimage
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io/fs"
+ "path/filepath"
+ "strings"
+
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/moby/buildkit/cache"
+ "github.com/moby/buildkit/exporter"
+ "github.com/moby/buildkit/exporter/attestation"
+ gatewaypb "github.com/moby/buildkit/frontend/gateway/pb"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/solver"
+ "github.com/moby/buildkit/solver/result"
+ "github.com/moby/buildkit/version"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ spdx_json "github.com/spdx/tools-golang/json"
+ "github.com/spdx/tools-golang/spdx/common"
+ spdx "github.com/spdx/tools-golang/spdx/v2_3"
+)
+
+var intotoPlatform ocispecs.Platform = ocispecs.Platform{
+ Architecture: "unknown",
+ OS: "unknown",
+}
+
+// supplementSBOM modifies SPDX attestations to include the file layers
+func supplementSBOM(ctx context.Context, s session.Group, target cache.ImmutableRef, targetRemote *solver.Remote, att exporter.Attestation) (exporter.Attestation, error) {
+ if target == nil {
+ return att, nil
+ }
+ if att.Kind != gatewaypb.AttestationKindInToto {
+ return att, nil
+ }
+ if att.InToto.PredicateType != intoto.PredicateSPDX {
+ return att, nil
+ }
+ name, ok := att.Metadata[result.AttestationSBOMCore]
+ if !ok {
+ return att, nil
+ }
+ if n, _, _ := strings.Cut(filepath.Base(att.Path), "."); n != string(name) {
+ return att, nil
+ }
+
+ content, err := attestation.ReadAll(ctx, s, att)
+ if err != nil {
+ return att, err
+ }
+
+ doc, err := decodeSPDX(content)
+ if err != nil {
+ // ignore decoding error
+ return att, nil
+ }
+
+ layers, err := newFileLayerFinder(target, targetRemote)
+ if err != nil {
+ return att, err
+ }
+ modifyFile := func(f *spdx.File) error {
+ if f == nil {
+ // Skip over nil entries - this is likely a bug in the SPDX parser,
+ // but we shouldn't accidentally panic if we encounter it.
+ return nil
+ }
+
+ if f.FileComment != "" {
+ // Skip over files that already have a comment - since the data is
+ // unstructured, we can't correctly overwrite this field without
+ // possibly breaking some scanner functionality.
+ return nil
+ }
+
+ _, desc, err := layers.find(ctx, s, f.FileName)
+ if err != nil {
+ if !errors.Is(err, fs.ErrNotExist) {
+ return err
+ }
+ return nil
+ }
+ f.FileComment = fmt.Sprintf("layerID: %s", desc.Digest.String())
+ return nil
+ }
+ for _, f := range doc.Files {
+ if err := modifyFile(f); err != nil {
+ return att, err
+ }
+ }
+ for _, p := range doc.Packages {
+ for _, f := range p.Files {
+ if err := modifyFile(f); err != nil {
+ return att, err
+ }
+ }
+ }
+
+ if doc.CreationInfo == nil {
+ doc.CreationInfo = &spdx.CreationInfo{}
+ }
+ doc.CreationInfo.Creators = append(doc.CreationInfo.Creators, common.Creator{
+ CreatorType: "Tool",
+ Creator: "buildkit-" + version.Version,
+ })
+
+ content, err = encodeSPDX(doc)
+ if err != nil {
+ return att, err
+ }
+
+ return exporter.Attestation{
+ Kind: att.Kind,
+ Path: att.Path,
+ ContentFunc: func() ([]byte, error) { return content, nil },
+ InToto: att.InToto,
+ }, nil
+}
+
+func decodeSPDX(dt []byte) (s *spdx.Document, err error) {
+ doc, err := spdx_json.Load2_3(bytes.NewReader(dt))
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to decode spdx")
+ }
+ if doc == nil {
+ return nil, errors.New("decoding produced empty spdx document")
+ }
+ return doc, nil
+}
+
+func encodeSPDX(s *spdx.Document) (dt []byte, err error) {
+ w := bytes.NewBuffer(nil)
+ err = spdx_json.Save2_3(s, w)
+ if err != nil {
+ return nil, errors.Wrap(err, "unable to encode spdx")
+ }
+ return w.Bytes(), nil
+}
+
+// fileLayerFinder finds the layer that contains a file, with caching to avoid
+// repeated FileList lookups.
+type fileLayerFinder struct {
+ pending []fileLayerEntry
+ cache map[string]fileLayerEntry
+}
+
+type fileLayerEntry struct {
+ ref cache.ImmutableRef
+ desc ocispecs.Descriptor
+}
+
+func newFileLayerFinder(target cache.ImmutableRef, remote *solver.Remote) (fileLayerFinder, error) {
+ chain := target.LayerChain()
+ descs := remote.Descriptors
+ if len(chain) != len(descs) {
+ return fileLayerFinder{}, errors.New("layer chain and descriptor list are not the same length")
+ }
+
+ pending := make([]fileLayerEntry, len(chain))
+ for i, ref := range chain {
+ pending[i] = fileLayerEntry{ref: ref, desc: descs[i]}
+ }
+ return fileLayerFinder{
+ pending: pending,
+ cache: map[string]fileLayerEntry{},
+ }, nil
+}
+
+// find finds the layer that contains the file, returning the ImmutableRef and
+// descriptor for the layer. If the file searched for was deleted, find returns
+// the layer that created the file, not the one that deleted it.
+//
+// find is not concurrency-safe.
+func (c *fileLayerFinder) find(ctx context.Context, s session.Group, filename string) (cache.ImmutableRef, *ocispecs.Descriptor, error) {
+ filename = filepath.Join("/", filename)
+
+ // return immediately if we've already found the layer containing filename
+ if cache, ok := c.cache[filename]; ok {
+ return cache.ref, &cache.desc, nil
+ }
+
+ for len(c.pending) > 0 {
+ // pop the last entry off the pending list (we traverse the layers backwards)
+ pending := c.pending[len(c.pending)-1]
+ files, err := pending.ref.FileList(ctx, s)
+ if err != nil {
+ return nil, nil, err
+ }
+ c.pending = c.pending[:len(c.pending)-1]
+
+ found := false
+ for _, f := range files {
+ f = filepath.Join("/", f)
+
+ if strings.HasPrefix(filepath.Base(f), ".wh.") {
+ // skip whiteout files, we only care about file creations
+ continue
+ }
+
+ // add all files in this layer to the cache
+ if _, ok := c.cache[f]; ok {
+ continue
+ }
+ c.cache[f] = pending
+
+ // if we found the file, return the layer (but finish populating the cache first)
+ if f == filename {
+ found = true
+ }
+ }
+ if found {
+ return pending.ref, &pending.desc, nil
+ }
+ }
+ return nil, nil, fs.ErrNotExist
+}
diff --git a/exporter/containerimage/export.go b/exporter/containerimage/export.go
index 429a3ce6df3b..55eaf3ff5803 100644
--- a/exporter/containerimage/export.go
+++ b/exporter/containerimage/export.go
@@ -14,8 +14,10 @@ import (
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/platforms"
+ "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/rootfs"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/moby/buildkit/cache"
cacheconfig "github.com/moby/buildkit/cache/config"
"github.com/moby/buildkit/exporter"
@@ -25,32 +27,27 @@ import (
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/leaseutil"
+ "github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/push"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
)
const (
- keyImageName = "name"
- keyPush = "push"
- keyPushByDigest = "push-by-digest"
- keyInsecure = "registry.insecure"
- keyUnpack = "unpack"
- keyDanglingPrefix = "dangling-name-prefix"
- keyNameCanonical = "name-canonical"
- keyLayerCompression = "compression"
- keyForceCompression = "force-compression"
- keyCompressionLevel = "compression-level"
- keyBuildInfo = "buildinfo"
- keyBuildInfoAttrs = "buildinfo-attrs"
- ociTypes = "oci-mediatypes"
- // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was
- // already found to use a non-distributable media type.
- // When this option is not set, the exporter will change the media type of the layer to a distributable one.
- preferNondistLayersKey = "prefer-nondist-layers"
+ keyPush = "push"
+ keyPushByDigest = "push-by-digest"
+ keyInsecure = "registry.insecure"
+ keyUnpack = "unpack"
+ keyDanglingPrefix = "dangling-name-prefix"
+ keyNameCanonical = "name-canonical"
+ keyStore = "store"
+
+ // keyUnsafeInternalStoreAllowIncomplete should only be used for tests. This option allows exporting image to the image store
+ // as well as lacking some blobs in the content store. Some integration tests for lazyref behaviour depends on this option.
+ // Ignored when store=false.
+ keyUnsafeInternalStoreAllowIncomplete = "unsafe-internal-store-allow-incomplete"
)
type Opt struct {
@@ -76,16 +73,24 @@ func New(opt Opt) (exporter.Exporter, error) {
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
i := &imageExporterInstance{
- imageExporter: e,
- layerCompression: compression.Default,
- buildInfo: true,
+ imageExporter: e,
+ opts: ImageCommitOpts{
+ RefCfg: cacheconfig.RefConfig{
+ Compression: compression.New(compression.Default),
+ },
+ BuildInfo: true,
+ ForceInlineAttestations: true,
+ },
+ store: true,
+ }
+
+ opt, err := i.opts.Load(opt)
+ if err != nil {
+ return nil, err
}
- var esgz bool
for k, v := range opt {
switch k {
- case keyImageName:
- i.targetName = v
case keyPush:
if v == "" {
i.push = true
@@ -126,85 +131,38 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
}
i.unpack = b
- case ociTypes:
+ case keyStore:
if v == "" {
- i.ociTypes = true
+ i.store = true
continue
}
b, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
}
- i.ociTypes = b
- case keyDanglingPrefix:
- i.danglingPrefix = v
- case keyNameCanonical:
+ i.store = b
+ case keyUnsafeInternalStoreAllowIncomplete:
if v == "" {
- i.nameCanonical = true
+ i.storeAllowIncomplete = true
continue
}
b, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
}
- i.nameCanonical = b
- case keyLayerCompression:
- switch v {
- case "gzip":
- i.layerCompression = compression.Gzip
- case "estargz":
- i.layerCompression = compression.EStargz
- esgz = true
- case "zstd":
- i.layerCompression = compression.Zstd
- case "uncompressed":
- i.layerCompression = compression.Uncompressed
- default:
- return nil, errors.Errorf("unsupported layer compression type: %v", v)
- }
- case keyForceCompression:
- if v == "" {
- i.forceCompression = true
- continue
- }
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, k)
- }
- i.forceCompression = b
- case keyCompressionLevel:
- ii, err := strconv.ParseInt(v, 10, 64)
- if err != nil {
- return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, k)
- }
- v := int(ii)
- i.compressionLevel = &v
- case keyBuildInfo:
- if v == "" {
- i.buildInfo = true
- continue
- }
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
- }
- i.buildInfo = b
- case keyBuildInfoAttrs:
+ i.storeAllowIncomplete = b
+ case keyDanglingPrefix:
+ i.danglingPrefix = v
+ case keyNameCanonical:
if v == "" {
- i.buildInfoAttrs = false
+ i.nameCanonical = true
continue
}
b, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
}
- i.buildInfoAttrs = b
- case preferNondistLayersKey:
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, k)
- }
- i.preferNondistLayers = b
+ i.nameCanonical = b
default:
if i.meta == nil {
i.meta = make(map[string][]byte)
@@ -212,51 +170,32 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
i.meta[k] = []byte(v)
}
}
- if esgz && !i.ociTypes {
- logrus.Warn("forcibly turning on oci-mediatype mode for estargz")
- i.ociTypes = true
- }
return i, nil
}
type imageExporterInstance struct {
*imageExporter
- targetName string
- push bool
- pushByDigest bool
- unpack bool
- insecure bool
- ociTypes bool
- nameCanonical bool
- danglingPrefix string
- layerCompression compression.Type
- forceCompression bool
- compressionLevel *int
- buildInfo bool
- buildInfoAttrs bool
- meta map[string][]byte
- preferNondistLayers bool
+ opts ImageCommitOpts
+ push bool
+ pushByDigest bool
+ unpack bool
+ store bool
+ storeAllowIncomplete bool
+ insecure bool
+ nameCanonical bool
+ danglingPrefix string
+ meta map[string][]byte
}
func (e *imageExporterInstance) Name() string {
return "exporting to image"
}
-func (e *imageExporterInstance) Config() exporter.Config {
- return exporter.Config{
- Compression: e.compression(),
- }
-}
-
-func (e *imageExporterInstance) compression() compression.Config {
- c := compression.New(e.layerCompression).SetForce(e.forceCompression)
- if e.compressionLevel != nil {
- c = c.SetLevel(*e.compressionLevel)
- }
- return c
+func (e *imageExporterInstance) Config() *exporter.Config {
+ return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression)
}
-func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) {
+func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) {
if src.Metadata == nil {
src.Metadata = make(map[string][]byte)
}
@@ -264,39 +203,50 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
src.Metadata[k] = v
}
- ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary)
+ opts := e.opts
+ as, _, err := ParseAnnotations(src.Metadata)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- defer done(context.TODO())
+ opts.Annotations = opts.Annotations.Merge(as)
- refCfg := e.refCfg()
- desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, refCfg, e.buildInfo, e.buildInfoAttrs, sessionID)
+ ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary)
if err != nil {
- return nil, err
+ return nil, nil, err
}
+ defer func() {
+ if descref == nil {
+ done(context.TODO())
+ }
+ }()
+ desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts)
+ if err != nil {
+ return nil, nil, err
+ }
defer func() {
- e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest)
+ if err == nil {
+ descref = NewDescriptorReference(*desc, done)
+ }
}()
resp := make(map[string]string)
- if n, ok := src.Metadata["image.name"]; e.targetName == "*" && ok {
- e.targetName = string(n)
+ if n, ok := src.Metadata["image.name"]; e.opts.ImageName == "*" && ok {
+ e.opts.ImageName = string(n)
}
nameCanonical := e.nameCanonical
- if e.targetName == "" && e.danglingPrefix != "" {
- e.targetName = e.danglingPrefix + "@" + desc.Digest.String()
+ if e.opts.ImageName == "" && e.danglingPrefix != "" {
+ e.opts.ImageName = e.danglingPrefix + "@" + desc.Digest.String()
nameCanonical = false
}
- if e.targetName != "" {
- targetNames := strings.Split(e.targetName, ",")
+ if e.opts.ImageName != "" {
+ targetNames := strings.Split(e.opts.ImageName, ",")
for _, targetName := range targetNames {
- if e.opt.Images != nil {
- tagDone := oneOffProgress(ctx, "naming to "+targetName)
+ if e.opt.Images != nil && e.store {
+ tagDone := progress.OneOff(ctx, "naming to "+targetName)
img := images.Image{
Target: *desc,
CreatedAt: time.Now(),
@@ -309,56 +259,59 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
img.Name = targetName + sfx
if _, err := e.opt.Images.Update(ctx, img); err != nil {
if !errors.Is(err, errdefs.ErrNotFound) {
- return nil, tagDone(err)
+ return nil, nil, tagDone(err)
}
if _, err := e.opt.Images.Create(ctx, img); err != nil {
- return nil, tagDone(err)
+ return nil, nil, tagDone(err)
}
}
}
tagDone(nil)
- if e.unpack {
+ if src.Ref != nil && e.unpack {
if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil {
- return nil, err
- }
- }
- }
- if e.push {
- annotations := map[digest.Digest]map[string]string{}
- mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
- if src.Ref != nil {
- remotes, err := src.Ref.GetRemotes(ctx, false, refCfg, false, session.NewGroup(sessionID))
- if err != nil {
- return nil, err
- }
- remote := remotes[0]
- for _, desc := range remote.Descriptors {
- mprovider.Add(desc.Digest, remote.Provider)
- addAnnotations(annotations, desc)
+ return nil, nil, err
}
}
- if len(src.Refs) > 0 {
- for _, r := range src.Refs {
- remotes, err := r.GetRemotes(ctx, false, refCfg, false, session.NewGroup(sessionID))
+
+ if !e.storeAllowIncomplete {
+ if src.Ref != nil {
+ remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
if err != nil {
- return nil, err
+ return nil, nil, err
}
remote := remotes[0]
- for _, desc := range remote.Descriptors {
- mprovider.Add(desc.Digest, remote.Provider)
- addAnnotations(annotations, desc)
+ if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
+ if err := unlazier.Unlazy(ctx); err != nil {
+ return nil, nil, err
+ }
+ }
+ }
+ if len(src.Refs) > 0 {
+ for _, r := range src.Refs {
+ remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
+ if err != nil {
+ return nil, nil, err
+ }
+ remote := remotes[0]
+ if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
+ if err := unlazier.Unlazy(ctx); err != nil {
+ return nil, nil, err
+ }
+ }
}
}
}
-
- if err := push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), desc.Digest, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations); err != nil {
- return nil, err
+ }
+ if e.push {
+ err := e.pushImage(ctx, src, sessionID, targetName, desc.Digest)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "failed to push %v", targetName)
}
}
}
- resp["image.name"] = e.targetName
+ resp["image.name"] = e.opts.ImageName
}
resp[exptypes.ExporterImageDigestKey] = desc.Digest.String()
@@ -369,22 +322,47 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
dtdesc, err := json.Marshal(desc)
if err != nil {
- return nil, err
+ return nil, nil, err
}
resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc)
- return resp, nil
+ return resp, nil, nil
}
-func (e *imageExporterInstance) refCfg() cacheconfig.RefConfig {
- return cacheconfig.RefConfig{
- Compression: e.compression(),
- PreferNonDistributable: e.preferNondistLayers,
+func (e *imageExporterInstance) pushImage(ctx context.Context, src *exporter.Source, sessionID string, targetName string, dgst digest.Digest) error {
+ annotations := map[digest.Digest]map[string]string{}
+ mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
+ if src.Ref != nil {
+ remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
+ if err != nil {
+ return err
+ }
+ remote := remotes[0]
+ for _, desc := range remote.Descriptors {
+ mprovider.Add(desc.Digest, remote.Provider)
+ addAnnotations(annotations, desc)
+ }
+ }
+ if len(src.Refs) > 0 {
+ for _, r := range src.Refs {
+ remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
+ if err != nil {
+ return err
+ }
+ remote := remotes[0]
+ for _, desc := range remote.Descriptors {
+ mprovider.Add(desc.Digest, remote.Provider)
+ addAnnotations(annotations, desc)
+ }
+ }
}
+
+ ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto")
+ return push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), dgst, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations)
}
-func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src exporter.Source, s session.Group) (err0 error) {
- unpackDone := oneOffProgress(ctx, "unpacking to "+img.Name)
+func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src *exporter.Source, s session.Group) (err0 error) {
+ unpackDone := progress.OneOff(ctx, "unpacking to "+img.Name)
defer func() {
unpackDone(err0)
}()
@@ -403,14 +381,14 @@ func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Imag
topLayerRef := src.Ref
if len(src.Refs) > 0 {
- if r, ok := src.Refs[platforms.DefaultString()]; ok {
+ if r, ok := src.Refs[defaultPlatform()]; ok {
topLayerRef = r
} else {
- return errors.Errorf("no reference for default platform %s", platforms.DefaultString())
+ return errors.Errorf("no reference for default platform %s", defaultPlatform())
}
}
- remotes, err := topLayerRef.GetRemotes(ctx, true, e.refCfg(), false, s)
+ remotes, err := topLayerRef.GetRemotes(ctx, true, e.opts.RefCfg, false, s)
if err != nil {
return err
}
@@ -482,3 +460,29 @@ func addAnnotations(m map[digest.Digest]map[string]string, desc ocispecs.Descrip
a[k] = v
}
}
+
+func defaultPlatform() string {
+ // Use normalized platform string to avoid the mismatch with platform options which
+ // are normalized using platforms.Normalize()
+ return platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
+}
+
+func NewDescriptorReference(desc ocispecs.Descriptor, release func(context.Context) error) exporter.DescriptorReference {
+ return &descriptorReference{
+ desc: desc,
+ release: release,
+ }
+}
+
+type descriptorReference struct {
+ desc ocispecs.Descriptor
+ release func(context.Context) error
+}
+
+func (d *descriptorReference) Descriptor() ocispecs.Descriptor {
+ return d.desc
+}
+
+func (d *descriptorReference) Release() error {
+ return d.release(context.TODO())
+}
diff --git a/exporter/containerimage/exptypes/annotations.go b/exporter/containerimage/exptypes/annotations.go
new file mode 100644
index 000000000000..e7697d916ad2
--- /dev/null
+++ b/exporter/containerimage/exptypes/annotations.go
@@ -0,0 +1,115 @@
+package exptypes
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/containerd/containerd/platforms"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+const (
+ AnnotationIndex = "index"
+ AnnotationIndexDescriptor = "index-descriptor"
+ AnnotationManifest = "manifest"
+ AnnotationManifestDescriptor = "manifest-descriptor"
+)
+
+var (
+ keyAnnotationRegexp = regexp.MustCompile(`^annotation(?:-([a-z-]+))?(?:\[([A-Za-z0-9_/-]+)\])?\.(\S+)$`)
+)
+
+type AnnotationKey struct {
+ Type string
+ Platform *ocispecs.Platform
+ Key string
+}
+
+func (k AnnotationKey) String() string {
+ prefix := "annotation"
+
+ switch k.Type {
+ case "":
+ case AnnotationManifest, AnnotationManifestDescriptor:
+ prefix += fmt.Sprintf("-%s", k.Type)
+ if p := k.PlatformString(); p != "" {
+ prefix += fmt.Sprintf("[%s]", p)
+ }
+ case AnnotationIndex, AnnotationIndexDescriptor:
+ prefix += "-" + k.Type
+ default:
+ panic("unknown annotation type")
+ }
+
+ return fmt.Sprintf("%s.%s", prefix, k.Key)
+}
+
+func (k AnnotationKey) PlatformString() string {
+ if k.Platform == nil {
+ return ""
+ }
+ return platforms.Format(*k.Platform)
+}
+
+func AnnotationIndexKey(key string) string {
+ return AnnotationKey{
+ Type: AnnotationIndex,
+ Key: key,
+ }.String()
+}
+
+func AnnotationIndexDescriptorKey(key string) string {
+ return AnnotationKey{
+ Type: AnnotationIndexDescriptor,
+ Key: key,
+ }.String()
+}
+
+func AnnotationManifestKey(p *ocispecs.Platform, key string) string {
+ return AnnotationKey{
+ Type: AnnotationManifest,
+ Platform: p,
+ Key: key,
+ }.String()
+}
+
+func AnnotationManifestDescriptorKey(p *ocispecs.Platform, key string) string {
+ return AnnotationKey{
+ Type: AnnotationManifestDescriptor,
+ Platform: p,
+ Key: key,
+ }.String()
+}
+
+func ParseAnnotationKey(result string) (AnnotationKey, bool, error) {
+ groups := keyAnnotationRegexp.FindStringSubmatch(result)
+ if groups == nil {
+ return AnnotationKey{}, false, nil
+ }
+
+ tp, platform, key := groups[1], groups[2], groups[3]
+ switch tp {
+ case AnnotationIndex, AnnotationIndexDescriptor, AnnotationManifest, AnnotationManifestDescriptor:
+ case "":
+ tp = AnnotationManifest
+ default:
+ return AnnotationKey{}, true, errors.Errorf("unrecognized annotation type %s", tp)
+ }
+
+ var ociPlatform *ocispecs.Platform
+ if platform != "" {
+ p, err := platforms.Parse(platform)
+ if err != nil {
+ return AnnotationKey{}, true, err
+ }
+ ociPlatform = &p
+ }
+
+ annotation := AnnotationKey{
+ Type: tp,
+ Platform: ociPlatform,
+ Key: key,
+ }
+ return annotation, true, nil
+}
diff --git a/exporter/containerimage/exptypes/parse.go b/exporter/containerimage/exptypes/parse.go
new file mode 100644
index 000000000000..f77cd3f52565
--- /dev/null
+++ b/exporter/containerimage/exptypes/parse.go
@@ -0,0 +1,56 @@
+package exptypes
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/containerd/containerd/platforms"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+func ParsePlatforms(meta map[string][]byte) (Platforms, error) {
+ if platformsBytes, ok := meta[ExporterPlatformsKey]; ok {
+ var ps Platforms
+ if len(platformsBytes) > 0 {
+ if err := json.Unmarshal(platformsBytes, &ps); err != nil {
+ return Platforms{}, errors.Wrapf(err, "failed to parse platforms passed to provenance processor")
+ }
+ }
+ return ps, nil
+ }
+
+ p := platforms.DefaultSpec()
+ if imgConfig, ok := meta[ExporterImageConfigKey]; ok {
+ var img ocispecs.Image
+ err := json.Unmarshal(imgConfig, &img)
+ if err != nil {
+ return Platforms{}, err
+ }
+
+ if img.OS != "" && img.Architecture != "" {
+ p = ocispecs.Platform{
+ Architecture: img.Architecture,
+ OS: img.OS,
+ OSVersion: img.OSVersion,
+ OSFeatures: img.OSFeatures,
+ Variant: img.Variant,
+ }
+ }
+ }
+ p = platforms.Normalize(p)
+ pk := platforms.Format(p)
+ ps := Platforms{
+ Platforms: []Platform{{ID: pk, Platform: p}},
+ }
+ return ps, nil
+}
+
+func ParseKey(meta map[string][]byte, key string, p Platform) []byte {
+ if v, ok := meta[fmt.Sprintf("%s/%s", key, p.ID)]; ok {
+ return v
+ } else if v, ok := meta[key]; ok {
+ return v
+ }
+ return nil
+}
diff --git a/exporter/containerimage/exptypes/types.go b/exporter/containerimage/exptypes/types.go
index a18d660a5c4a..4531360afa80 100644
--- a/exporter/containerimage/exptypes/types.go
+++ b/exporter/containerimage/exptypes/types.go
@@ -11,10 +11,19 @@ const (
ExporterImageConfigDigestKey = "containerimage.config.digest"
ExporterImageDescriptorKey = "containerimage.descriptor"
ExporterInlineCache = "containerimage.inlinecache"
- ExporterBuildInfo = "containerimage.buildinfo"
+ ExporterBuildInfo = "containerimage.buildinfo" // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md
ExporterPlatformsKey = "refs.platforms"
+ ExporterEpochKey = "source.date.epoch"
)
+// KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by
+// a platform to become platform specific
+var KnownRefMetadataKeys = []string{
+ ExporterImageConfigKey,
+ ExporterInlineCache,
+ ExporterBuildInfo,
+}
+
type Platforms struct {
Platforms []Platform
}
diff --git a/exporter/containerimage/image/docker_image.go b/exporter/containerimage/image/docker_image.go
new file mode 100644
index 000000000000..a35d811d55cf
--- /dev/null
+++ b/exporter/containerimage/image/docker_image.go
@@ -0,0 +1,52 @@
+package image
+
+import (
+ "time"
+
+ "github.com/docker/docker/api/types/strslice"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// HealthConfig holds configuration settings for the HEALTHCHECK feature.
+type HealthConfig struct {
+ // Test is the test to perform to check that the container is healthy.
+ // An empty slice means to inherit the default.
+ // The options are:
+ // {} : inherit healthcheck
+ // {"NONE"} : disable healthcheck
+ // {"CMD", args...} : exec arguments directly
+ // {"CMD-SHELL", command} : run command with system's default shell
+ Test []string `json:",omitempty"`
+
+ // Zero means to inherit. Durations are expressed as integer nanoseconds.
+ Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
+ Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
+ StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
+
+ // Retries is the number of consecutive failures needed to consider a container as unhealthy.
+ // Zero means inherit.
+ Retries int `json:",omitempty"`
+}
+
+// ImageConfig is a docker compatible config for an image
+type ImageConfig struct {
+ ocispecs.ImageConfig
+
+ Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
+ ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
+
+ // NetworkDisabled bool `json:",omitempty"` // Is network disabled
+ // MacAddress string `json:",omitempty"` // Mac Address of the container
+ OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
+ StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
+ Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
+}
+
+// Image is the JSON structure which describes some basic information about the image.
+// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
+type Image struct {
+ ocispecs.Image
+
+ // Config defines the execution parameters which should be used as a base when running a container using the image.
+ Config ImageConfig `json:"config,omitempty"`
+}
diff --git a/exporter/containerimage/opts.go b/exporter/containerimage/opts.go
new file mode 100644
index 000000000000..4948eaad2431
--- /dev/null
+++ b/exporter/containerimage/opts.go
@@ -0,0 +1,161 @@
+package containerimage
+
+import (
+ "strconv"
+ "time"
+
+ cacheconfig "github.com/moby/buildkit/cache/config"
+ "github.com/moby/buildkit/exporter/util/epoch"
+ "github.com/moby/buildkit/util/compression"
+ "github.com/pkg/errors"
+ "github.com/sirupsen/logrus"
+)
+
+const (
+ keyImageName = "name"
+ keyLayerCompression = "compression"
+ keyCompressionLevel = "compression-level"
+ keyForceCompression = "force-compression"
+ keyOCITypes = "oci-mediatypes"
+ keyBuildInfo = "buildinfo"
+ keyBuildInfoAttrs = "buildinfo-attrs"
+ keyForceInlineAttestations = "attestation-inline"
+
+ // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was
+ // already found to use a non-distributable media type.
+ // When this option is not set, the exporter will change the media type of the layer to a distributable one.
+ keyPreferNondistLayers = "prefer-nondist-layers"
+)
+
+type ImageCommitOpts struct {
+ ImageName string
+ RefCfg cacheconfig.RefConfig
+ OCITypes bool
+ Annotations AnnotationsGroup
+ Epoch *time.Time
+
+ ForceInlineAttestations bool // force inline attestations to be attached
+
+ BuildInfo bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md
+ BuildInfoAttrs bool // Deprecated: Build information is deprecated: https://github.com/moby/buildkit/blob/master/docs/deprecated.md
+}
+
+func (c *ImageCommitOpts) Load(opt map[string]string) (map[string]string, error) {
+ rest := make(map[string]string)
+
+ as, optb, err := ParseAnnotations(toBytesMap(opt))
+ if err != nil {
+ return nil, err
+ }
+ opt = toStringMap(optb)
+
+ c.Epoch, opt, err = epoch.ParseExporterAttrs(opt)
+ if err != nil {
+ return nil, err
+ }
+
+ for k, v := range opt {
+ var err error
+ switch k {
+ case keyImageName:
+ c.ImageName = v
+ case keyLayerCompression:
+ c.RefCfg.Compression.Type, err = compression.Parse(v)
+ case keyCompressionLevel:
+ ii, err2 := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ err = errors.Wrapf(err2, "non-int value %s specified for %s", v, k)
+ break
+ }
+ v := int(ii)
+ c.RefCfg.Compression.Level = &v
+ case keyForceCompression:
+ err = parseBoolWithDefault(&c.RefCfg.Compression.Force, k, v, true)
+ case keyOCITypes:
+ err = parseBoolWithDefault(&c.OCITypes, k, v, true)
+ case keyBuildInfo:
+ err = parseBoolWithDefault(&c.BuildInfo, k, v, true)
+ case keyBuildInfoAttrs:
+ err = parseBoolWithDefault(&c.BuildInfoAttrs, k, v, false)
+ case keyForceInlineAttestations:
+ err = parseBool(&c.ForceInlineAttestations, k, v)
+ case keyPreferNondistLayers:
+ err = parseBool(&c.RefCfg.PreferNonDistributable, k, v)
+ default:
+ rest[k] = v
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if c.RefCfg.Compression.Type.OnlySupportOCITypes() {
+ c.EnableOCITypes(c.RefCfg.Compression.Type.String())
+ }
+
+ if c.RefCfg.Compression.Type.NeedsForceCompression() {
+ c.EnableForceCompression(c.RefCfg.Compression.Type.String())
+ }
+
+ c.Annotations = c.Annotations.Merge(as)
+
+ return rest, nil
+}
+
+func (c *ImageCommitOpts) EnableOCITypes(reason string) {
+ if !c.OCITypes {
+ message := "forcibly turning on oci-mediatype mode"
+ if reason != "" {
+ message += " for " + reason
+ }
+ logrus.Warn(message)
+
+ c.OCITypes = true
+ }
+}
+
+func (c *ImageCommitOpts) EnableForceCompression(reason string) {
+ if !c.RefCfg.Compression.Force {
+ message := "forcibly turning on force-compression mode"
+ if reason != "" {
+ message += " for " + reason
+ }
+ logrus.Warn(message)
+
+ c.RefCfg.Compression.Force = true
+ }
+}
+
+func parseBool(dest *bool, key string, value string) error {
+ b, err := strconv.ParseBool(value)
+ if err != nil {
+ return errors.Wrapf(err, "non-bool value specified for %s", key)
+ }
+ *dest = b
+ return nil
+}
+
+func parseBoolWithDefault(dest *bool, key string, value string, defaultValue bool) error {
+ if value == "" {
+ *dest = defaultValue
+ return nil
+ }
+ return parseBool(dest, key, value)
+}
+
+func toBytesMap(m map[string]string) map[string][]byte {
+ result := make(map[string][]byte)
+ for k, v := range m {
+ result[k] = []byte(v)
+ }
+ return result
+}
+
+func toStringMap(m map[string][]byte) map[string]string {
+ result := make(map[string]string)
+ for k, v := range m {
+ result[k] = string(v)
+ }
+ return result
+}
diff --git a/exporter/containerimage/patch.go b/exporter/containerimage/patch.go
new file mode 100644
index 000000000000..93866b018bda
--- /dev/null
+++ b/exporter/containerimage/patch.go
@@ -0,0 +1,18 @@
+//go:build !nydus
+// +build !nydus
+
+package containerimage
+
+import (
+ "context"
+
+ "github.com/moby/buildkit/cache"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/solver"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) {
+ remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes)
+ return remote, history, nil
+}
diff --git a/exporter/containerimage/patch_nydus.go b/exporter/containerimage/patch_nydus.go
new file mode 100644
index 000000000000..3a9336a66f64
--- /dev/null
+++ b/exporter/containerimage/patch_nydus.go
@@ -0,0 +1,35 @@
+//go:build nydus
+// +build nydus
+
+package containerimage
+
+import (
+ "context"
+
+ "github.com/moby/buildkit/cache"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/solver"
+ "github.com/moby/buildkit/util/compression"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// patchImageLayers appends an extra nydus bootstrap layer
+// to the manifest of nydus image, normalizes layers and
+// history. The nydus bootstrap layer represents the whole
+// metadata of filesystem view for the entire image.
+func patchImageLayers(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, opts *ImageCommitOpts, sg session.Group) (*solver.Remote, []ocispecs.History, error) {
+ if opts.RefCfg.Compression.Type != compression.Nydus {
+ remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes)
+ return remote, history, nil
+ }
+
+ desc, err := cache.MergeNydus(ctx, ref, opts.RefCfg.Compression, sg)
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "merge nydus layer")
+ }
+ remote.Descriptors = append(remote.Descriptors, *desc)
+
+ remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, opts.OCITypes)
+ return remote, history, nil
+}
diff --git a/exporter/containerimage/writer.go b/exporter/containerimage/writer.go
index e5ec1519803d..068d86958f8f 100644
--- a/exporter/containerimage/writer.go
+++ b/exporter/containerimage/writer.go
@@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "strconv"
"strings"
"time"
@@ -12,18 +13,24 @@ import (
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/moby/buildkit/cache"
cacheconfig "github.com/moby/buildkit/cache/config"
"github.com/moby/buildkit/exporter"
+ "github.com/moby/buildkit/exporter/attestation"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
+ "github.com/moby/buildkit/exporter/util/epoch"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/solver"
+ "github.com/moby/buildkit/solver/result"
+ attestationTypes "github.com/moby/buildkit/util/attestation"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/buildinfo"
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/progress"
+ "github.com/moby/buildkit/util/purl"
"github.com/moby/buildkit/util/system"
"github.com/moby/buildkit/util/tracing"
digest "github.com/opencontainers/go-digest"
@@ -50,57 +57,123 @@ type ImageWriter struct {
opt WriterOpt
}
-func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, refCfg cacheconfig.RefConfig, buildInfo bool, buildInfoAttrs bool, sessionID string) (*ocispecs.Descriptor, error) {
- platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey]
-
- if len(inp.Refs) > 0 && !ok {
+func (ic *ImageWriter) Commit(ctx context.Context, inp *exporter.Source, sessionID string, opts *ImageCommitOpts) (*ocispecs.Descriptor, error) {
+ if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; len(inp.Refs) > 0 && !ok {
return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping")
}
- if len(inp.Refs) == 0 {
- remotes, err := ic.exportLayers(ctx, refCfg, session.NewGroup(sessionID), inp.Ref)
+ isMap := len(inp.Refs) > 0
+
+ ps, err := exptypes.ParsePlatforms(inp.Metadata)
+ if err != nil {
+ return nil, err
+ }
+
+ if !isMap {
+ // enable index if we need to include attestations
+ for _, p := range ps.Platforms {
+ if atts, ok := inp.Attestations[p.ID]; ok {
+ if !opts.ForceInlineAttestations {
+ // if we don't need force inline attestations (for oci
+ // exporter), filter them out
+ atts = attestation.Filter(atts, nil, map[string][]byte{
+ result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)),
+ })
+ }
+ if len(atts) > 0 {
+ isMap = true
+ break
+ }
+ }
+ }
+ }
+ if opts.Epoch == nil {
+ if tm, ok, err := epoch.ParseSource(inp); err != nil {
+ return nil, err
+ } else if ok {
+ opts.Epoch = tm
+ }
+ }
+
+ for pk, a := range opts.Annotations {
+ if pk != "" {
+ if _, ok := inp.FindRef(pk); !ok {
+ return nil, errors.Errorf("invalid annotation: no platform %s found in source", pk)
+ }
+ }
+ if len(a.Index)+len(a.IndexDescriptor)+len(a.ManifestDescriptor) > 0 {
+ opts.EnableOCITypes("annotations")
+ }
+ }
+
+ if !isMap {
+ if len(ps.Platforms) > 1 {
+ return nil, errors.Errorf("cannot export multiple platforms without multi-platform enabled")
+ }
+
+ var ref cache.ImmutableRef
+ var p exptypes.Platform
+ if len(ps.Platforms) > 0 {
+ p = ps.Platforms[0]
+ if r, ok := inp.FindRef(p.ID); ok {
+ ref = r
+ }
+ } else {
+ ref = inp.Ref
+ }
+
+ remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), ref)
if err != nil {
return nil, err
}
var dtbi []byte
- if buildInfo {
- if dtbi, err = buildinfo.Format(inp.Metadata[exptypes.ExporterBuildInfo], buildinfo.FormatOpts{
- RemoveAttrs: !buildInfoAttrs,
+ if opts.BuildInfo {
+ if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{
+ RemoveAttrs: !opts.BuildInfoAttrs,
}); err != nil {
return nil, err
}
}
- mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], &remotes[0], oci, inp.Metadata[exptypes.ExporterInlineCache], dtbi)
+ annotations := opts.Annotations.Platform(nil)
+ if len(annotations.Index) > 0 || len(annotations.IndexDescriptor) > 0 {
+ return nil, errors.Errorf("index annotations not supported for single platform export")
+ }
+
+ config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p)
+ inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p)
+ mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, opts, ref, config, &remotes[0], annotations, inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID))
if err != nil {
return nil, err
}
if mfstDesc.Annotations == nil {
mfstDesc.Annotations = make(map[string]string)
}
+ if len(ps.Platforms) == 1 {
+ mfstDesc.Platform = &ps.Platforms[0].Platform
+ }
mfstDesc.Annotations[exptypes.ExporterConfigDigestKey] = configDesc.Digest.String()
return mfstDesc, nil
}
- var p exptypes.Platforms
- if err := json.Unmarshal(platformsBytes, &p); err != nil {
- return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter")
- }
-
- if len(p.Platforms) != len(inp.Refs) {
- return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs))
+ if len(inp.Attestations) > 0 {
+ opts.EnableOCITypes("attestations")
}
refs := make([]cache.ImmutableRef, 0, len(inp.Refs))
remotesMap := make(map[string]int, len(inp.Refs))
- for id, r := range inp.Refs {
- remotesMap[id] = len(refs)
+ for _, p := range ps.Platforms {
+ r, ok := inp.FindRef(p.ID)
+ if !ok {
+ return nil, errors.Errorf("failed to find ref for ID %s", p.ID)
+ }
+ remotesMap[p.ID] = len(refs)
refs = append(refs, r)
}
- remotes, err := ic.exportLayers(ctx, refCfg, session.NewGroup(sessionID), refs...)
+ remotes, err := ic.exportLayers(ctx, opts.RefCfg, session.NewGroup(sessionID), refs...)
if err != nil {
return nil, err
}
@@ -114,36 +187,46 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
}{
MediaType: ocispecs.MediaTypeImageIndex,
Index: ocispecs.Index{
+ Annotations: opts.Annotations.Platform(nil).Index,
Versioned: specs.Versioned{
SchemaVersion: 2,
},
},
}
- if !oci {
+ if !opts.OCITypes {
idx.MediaType = images.MediaTypeDockerSchema2ManifestList
}
labels := map[string]string{}
- for i, p := range p.Platforms {
- r, ok := inp.Refs[p.ID]
+ var attestationManifests []ocispecs.Descriptor
+
+ for i, p := range ps.Platforms {
+ r, ok := inp.FindRef(p.ID)
if !ok {
return nil, errors.Errorf("failed to find ref for ID %s", p.ID)
}
- config := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.ID)]
- inlineCache := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, p.ID)]
+ config := exptypes.ParseKey(inp.Metadata, exptypes.ExporterImageConfigKey, p)
+ inlineCache := exptypes.ParseKey(inp.Metadata, exptypes.ExporterInlineCache, p)
var dtbi []byte
- if buildInfo {
- if dtbi, err = buildinfo.Format(inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p.ID)], buildinfo.FormatOpts{
- RemoveAttrs: !buildInfoAttrs,
+ if opts.BuildInfo {
+ if dtbi, err = buildinfo.Format(exptypes.ParseKey(inp.Metadata, exptypes.ExporterBuildInfo, p), buildinfo.FormatOpts{
+ RemoveAttrs: !opts.BuildInfoAttrs,
}); err != nil {
return nil, err
}
}
- desc, _, err := ic.commitDistributionManifest(ctx, r, config, &remotes[remotesMap[p.ID]], oci, inlineCache, dtbi)
+ remote := &remotes[remotesMap[p.ID]]
+ if remote == nil {
+ remote = &solver.Remote{
+ Provider: ic.opt.ContentStore,
+ }
+ }
+
+ desc, _, err := ic.commitDistributionManifest(ctx, opts, r, config, remote, opts.Annotations.Platform(&p.Platform), inlineCache, dtbi, opts.Epoch, session.NewGroup(sessionID))
if err != nil {
return nil, err
}
@@ -152,20 +235,75 @@ func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool
idx.Manifests = append(idx.Manifests, *desc)
labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = desc.Digest.String()
+
+ if attestations, ok := inp.Attestations[p.ID]; ok {
+ attestations, err := attestation.Unbundle(ctx, session.NewGroup(sessionID), attestations)
+ if err != nil {
+ return nil, err
+ }
+
+ eg, ctx2 := errgroup.WithContext(ctx)
+ for i, att := range attestations {
+ i, att := i, att
+ eg.Go(func() error {
+ att, err := supplementSBOM(ctx2, session.NewGroup(sessionID), r, remote, att)
+ if err != nil {
+ return err
+ }
+ attestations[i] = att
+ return nil
+ })
+ }
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ var defaultSubjects []intoto.Subject
+ for _, name := range strings.Split(opts.ImageName, ",") {
+ if name == "" {
+ continue
+ }
+ pl, err := purl.RefToPURL(name, &p.Platform)
+ if err != nil {
+ return nil, err
+ }
+ defaultSubjects = append(defaultSubjects, intoto.Subject{
+ Name: pl,
+ Digest: result.ToDigestMap(desc.Digest),
+ })
+ }
+ stmts, err := attestation.MakeInTotoStatements(ctx, session.NewGroup(sessionID), attestations, defaultSubjects)
+ if err != nil {
+ return nil, err
+ }
+
+ desc, err := ic.commitAttestationsManifest(ctx, opts, p, desc.Digest.String(), stmts)
+ if err != nil {
+ return nil, err
+ }
+ desc.Platform = &intotoPlatform
+ attestationManifests = append(attestationManifests, *desc)
+ }
+ }
+
+ for i, mfst := range attestationManifests {
+ idx.Manifests = append(idx.Manifests, mfst)
+ labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", len(ps.Platforms)+i)] = mfst.Digest.String()
}
- idxBytes, err := json.MarshalIndent(idx, "", " ")
+ idxBytes, err := json.MarshalIndent(idx, "", " ")
if err != nil {
return nil, errors.Wrap(err, "failed to marshal index")
}
idxDigest := digest.FromBytes(idxBytes)
idxDesc := ocispecs.Descriptor{
- Digest: idxDigest,
- Size: int64(len(idxBytes)),
- MediaType: idx.MediaType,
+ Digest: idxDigest,
+ Size: int64(len(idxBytes)),
+ MediaType: idx.MediaType,
+ Annotations: opts.Annotations.Platform(nil).IndexDescriptor,
}
- idxDone := oneOffProgress(ctx, "exporting manifest list "+idxDigest.String())
+ idxDone := progress.OneOff(ctx, "exporting manifest list "+idxDigest.String())
if err := content.WriteBlob(ctx, ic.opt.ContentStore, idxDigest.String(), bytes.NewReader(idxBytes), idxDesc, content.WithLabels(labels)); err != nil {
return nil, idxDone(errors.Wrapf(err, "error writing manifest list blob %s", idxDigest))
@@ -186,7 +324,7 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC
span, ctx := tracing.StartSpan(ctx, "export layers", trace.WithAttributes(attr...))
eg, ctx := errgroup.WithContext(ctx)
- layersDone := oneOffProgress(ctx, "exporting layers")
+ layersDone := progress.OneOff(ctx, "exporting layers")
out := make([]solver.Remote, len(refs))
@@ -212,29 +350,26 @@ func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefC
return out, err
}
-func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, remote *solver.Remote, oci bool, inlineCache []byte, buildInfo []byte) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) {
+func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, opts *ImageCommitOpts, ref cache.ImmutableRef, config []byte, remote *solver.Remote, annotations *Annotations, inlineCache []byte, buildInfo []byte, epoch *time.Time, sg session.Group) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) {
if len(config) == 0 {
var err error
- config, err = emptyImageConfig()
+ config, err = defaultImageConfig()
if err != nil {
return nil, nil, err
}
}
- if remote == nil {
- remote = &solver.Remote{
- Provider: ic.opt.ContentStore,
- }
- }
-
history, err := parseHistoryFromConfig(config)
if err != nil {
return nil, nil, err
}
- remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, oci)
+ remote, history, err = patchImageLayers(ctx, remote, history, ref, opts, sg)
+ if err != nil {
+ return nil, nil, err
+ }
- config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo)
+ config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo, epoch)
if err != nil {
return nil, nil, err
}
@@ -246,7 +381,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache
)
// Use docker media types for older Docker versions and registries
- if !oci {
+ if !opts.OCITypes {
manifestType = images.MediaTypeDockerSchema2Manifest
configType = images.MediaTypeDockerSchema2Config
}
@@ -260,6 +395,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache
}{
MediaType: manifestType,
Manifest: ocispecs.Manifest{
+ Annotations: annotations.Manifest,
Versioned: specs.Versioned{
SchemaVersion: 2,
},
@@ -275,25 +411,12 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache
"containerd.io/gc.ref.content.0": configDigest.String(),
}
- for i, desc := range remote.Descriptors {
- // oci supports annotations but don't export internal annotations
- if oci {
- delete(desc.Annotations, "containerd.io/uncompressed")
- delete(desc.Annotations, "buildkit/createdat")
- for k := range desc.Annotations {
- if strings.HasPrefix(k, "containerd.io/distribution.source.") {
- delete(desc.Annotations, k)
- }
- }
- } else {
- desc.Annotations = nil
- }
-
+ for _, desc := range remote.Descriptors {
+ desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes)
mfst.Layers = append(mfst.Layers, desc)
- labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String()
}
- mfstJSON, err := json.MarshalIndent(mfst, "", " ")
+ mfstJSON, err := json.MarshalIndent(mfst, "", " ")
if err != nil {
return nil, nil, errors.Wrap(err, "failed to marshal manifest")
}
@@ -303,7 +426,7 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache
Digest: mfstDigest,
Size: int64(len(mfstJSON)),
}
- mfstDone := oneOffProgress(ctx, "exporting manifest "+mfstDigest.String())
+ mfstDone := progress.OneOff(ctx, "exporting manifest "+mfstDigest.String())
if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil {
return nil, nil, mfstDone(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest))
@@ -315,18 +438,125 @@ func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache
Size: int64(len(config)),
MediaType: configType,
}
- configDone := oneOffProgress(ctx, "exporting config "+configDigest.String())
+ configDone := progress.OneOff(ctx, "exporting config "+configDigest.String())
if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil {
return nil, nil, configDone(errors.Wrap(err, "error writing config blob"))
}
configDone(nil)
+ return &ocispecs.Descriptor{
+ Annotations: annotations.ManifestDescriptor,
+ Digest: mfstDigest,
+ Size: int64(len(mfstJSON)),
+ MediaType: manifestType,
+ }, &configDesc, nil
+}
+
+func (ic *ImageWriter) commitAttestationsManifest(ctx context.Context, opts *ImageCommitOpts, p exptypes.Platform, target string, statements []intoto.Statement) (*ocispecs.Descriptor, error) {
+ var (
+ manifestType = ocispecs.MediaTypeImageManifest
+ configType = ocispecs.MediaTypeImageConfig
+ )
+ if !opts.OCITypes {
+ manifestType = images.MediaTypeDockerSchema2Manifest
+ configType = images.MediaTypeDockerSchema2Config
+ }
+
+ layers := make([]ocispecs.Descriptor, len(statements))
+ for i, statement := range statements {
+ i, statement := i, statement
+
+ data, err := json.Marshal(statement)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal attestation")
+ }
+ digest := digest.FromBytes(data)
+ desc := ocispecs.Descriptor{
+ MediaType: attestationTypes.MediaTypeDockerSchema2AttestationType,
+ Digest: digest,
+ Size: int64(len(data)),
+ Annotations: map[string]string{
+ "containerd.io/uncompressed": digest.String(),
+ "in-toto.io/predicate-type": statement.PredicateType,
+ },
+ }
+
+ if err := content.WriteBlob(ctx, ic.opt.ContentStore, digest.String(), bytes.NewReader(data), desc); err != nil {
+ return nil, errors.Wrapf(err, "error writing data blob %s", digest)
+ }
+ layers[i] = desc
+ }
+
+ config, err := attestationsConfig(layers)
+ if err != nil {
+ return nil, err
+ }
+ configDigest := digest.FromBytes(config)
+ configDesc := ocispecs.Descriptor{
+ Digest: configDigest,
+ Size: int64(len(config)),
+ MediaType: configType,
+ }
+
+ mfst := struct {
+ // MediaType is reserved in the OCI spec but
+ // excluded from go types.
+ MediaType string `json:"mediaType,omitempty"`
+
+ ocispecs.Manifest
+ }{
+ MediaType: manifestType,
+ Manifest: ocispecs.Manifest{
+ Versioned: specs.Versioned{
+ SchemaVersion: 2,
+ },
+ Config: ocispecs.Descriptor{
+ Digest: configDigest,
+ Size: int64(len(config)),
+ MediaType: configType,
+ },
+ },
+ }
+
+ labels := map[string]string{
+ "containerd.io/gc.ref.content.0": configDigest.String(),
+ }
+ for i, desc := range layers {
+ desc.Annotations = RemoveInternalLayerAnnotations(desc.Annotations, opts.OCITypes)
+ mfst.Layers = append(mfst.Layers, desc)
+ labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String()
+ }
+
+ mfstJSON, err := json.MarshalIndent(mfst, "", " ")
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal manifest")
+ }
+
+ mfstDigest := digest.FromBytes(mfstJSON)
+ mfstDesc := ocispecs.Descriptor{
+ Digest: mfstDigest,
+ Size: int64(len(mfstJSON)),
+ }
+
+ done := progress.OneOff(ctx, "exporting attestation manifest "+mfstDigest.String())
+ if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil {
+ return nil, done(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest))
+ }
+ if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil {
+ return nil, done(errors.Wrap(err, "error writing config blob"))
+ }
+ done(nil)
+
return &ocispecs.Descriptor{
Digest: mfstDigest,
Size: int64(len(mfstJSON)),
MediaType: manifestType,
- }, &configDesc, nil
+ Annotations: map[string]string{
+ attestationTypes.DockerAnnotationReferenceType: attestationTypes.DockerAnnotationReferenceTypeDefault,
+ attestationTypes.DockerAnnotationReferenceDigest: target,
+ },
+ }, nil
}
func (ic *ImageWriter) ContentStore() content.Store {
@@ -341,22 +571,13 @@ func (ic *ImageWriter) Applier() diff.Applier {
return ic.opt.Applier
}
-func emptyImageConfig() ([]byte, error) {
+func defaultImageConfig() ([]byte, error) {
pl := platforms.Normalize(platforms.DefaultSpec())
- type image struct {
- ocispecs.Image
-
- // Variant defines platform variant. To be added to OCI.
- Variant string `json:"variant,omitempty"`
- }
-
- img := image{
- Image: ocispecs.Image{
- Architecture: pl.Architecture,
- OS: pl.OS,
- },
- Variant: pl.Variant,
+ img := ocispecs.Image{
+ Architecture: pl.Architecture,
+ OS: pl.OS,
+ Variant: pl.Variant,
}
img.RootFS.Type = "layers"
img.Config.WorkingDir = "/"
@@ -365,6 +586,22 @@ func emptyImageConfig() ([]byte, error) {
return dt, errors.Wrap(err, "failed to create empty image config")
}
+func attestationsConfig(layers []ocispecs.Descriptor) ([]byte, error) {
+ img := ocispecs.Image{
+ Architecture: intotoPlatform.Architecture,
+ OS: intotoPlatform.OS,
+ OSVersion: intotoPlatform.OSVersion,
+ OSFeatures: intotoPlatform.OSFeatures,
+ Variant: intotoPlatform.Variant,
+ }
+ img.RootFS.Type = "layers"
+ for _, layer := range layers {
+ img.RootFS.DiffIDs = append(img.RootFS.DiffIDs, digest.Digest(layer.Annotations["containerd.io/uncompressed"]))
+ }
+ dt, err := json.Marshal(img)
+ return dt, errors.Wrap(err, "failed to create attestations image config")
+}
+
func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) {
var config struct {
History []ocispecs.History
@@ -375,7 +612,7 @@ func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) {
return config.History, nil
}
-func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte) ([]byte, error) {
+func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte, epoch *time.Time) ([]byte, error) {
m := map[string]json.RawMessage{}
if err := json.Unmarshal(dt, &m); err != nil {
return nil, errors.Wrap(err, "failed to parse image config for patch")
@@ -392,12 +629,35 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs
}
m["rootfs"] = dt
+ if epoch != nil {
+ for i, h := range history {
+ if h.Created == nil || h.Created.After(*epoch) {
+ history[i].Created = epoch
+ }
+ }
+ }
+
dt, err = json.Marshal(history)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal history")
}
m["history"] = dt
+ // if epoch is set then clamp creation time
+ if v, ok := m["created"]; ok && epoch != nil {
+ var tm time.Time
+ if err := json.Unmarshal(v, &tm); err != nil {
+ return nil, errors.Wrapf(err, "failed to unmarshal creation time %q", m["created"])
+ }
+ if tm.After(*epoch) {
+ dt, err = json.Marshal(&epoch)
+ if err != nil {
+ return nil, errors.Wrap(err, "failed to marshal creation time")
+ }
+ m["created"] = dt
+ }
+ }
+
if _, ok := m["created"]; !ok {
var tm *time.Time
for _, h := range history {
@@ -426,7 +686,7 @@ func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs
return nil, err
}
m[binfotypes.ImageConfigField] = dt
- } else if _, ok := m[binfotypes.ImageConfigField]; ok {
+ } else {
delete(m, binfotypes.ImageConfigField)
}
@@ -521,6 +781,26 @@ func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, histo
return remote, history
}
+func RemoveInternalLayerAnnotations(in map[string]string, oci bool) map[string]string {
+ if len(in) == 0 || !oci {
+ return nil
+ }
+ m := make(map[string]string, len(in))
+ for k, v := range in {
+ // oci supports annotations but don't export internal annotations
+ switch k {
+ case "containerd.io/uncompressed", "buildkit/createdat":
+ continue
+ default:
+ if strings.HasPrefix(k, "containerd.io/distribution.source.") {
+ continue
+ }
+ m[k] = v
+ }
+ }
+ return m
+}
+
type refMetadata struct {
description string
createdAt *time.Time
@@ -553,20 +833,3 @@ func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata {
}
return metas
}
-
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
- pw, _, _ := progress.NewFromContext(ctx)
- now := time.Now()
- st := progress.Status{
- Started: &now,
- }
- pw.Write(id, st)
- return func(err error) error {
- // TODO: set error on status
- now := time.Now()
- st.Completed = &now
- pw.Write(id, st)
- pw.Close()
- return err
- }
-}
diff --git a/exporter/exporter.go b/exporter/exporter.go
index 610481b710f6..0e7d8d14f280 100644
--- a/exporter/exporter.go
+++ b/exporter/exporter.go
@@ -4,25 +4,49 @@ import (
"context"
"github.com/moby/buildkit/cache"
+ "github.com/moby/buildkit/solver/result"
"github.com/moby/buildkit/util/compression"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
+type Source = result.Result[cache.ImmutableRef]
+
+type Attestation = result.Attestation[cache.ImmutableRef]
+
type Exporter interface {
Resolve(context.Context, map[string]string) (ExporterInstance, error)
}
type ExporterInstance interface {
Name() string
- Config() Config
- Export(ctx context.Context, src Source, sessionID string) (map[string]string, error)
+ Config() *Config
+ Export(ctx context.Context, src *Source, sessionID string) (map[string]string, DescriptorReference, error)
}
-type Source struct {
- Ref cache.ImmutableRef
- Refs map[string]cache.ImmutableRef
- Metadata map[string][]byte
+type DescriptorReference interface {
+ Release() error
+ Descriptor() ocispecs.Descriptor
}
type Config struct {
- Compression compression.Config
+ // Make the field private in case it is initialized with nil compression.Type
+ compression compression.Config
+}
+
+func NewConfig() *Config {
+ return &Config{
+ compression: compression.Config{
+ Type: compression.Default,
+ },
+ }
+}
+
+func NewConfigWithCompression(comp compression.Config) *Config {
+ return &Config{
+ compression: comp,
+ }
+}
+
+func (c *Config) Compression() compression.Config {
+ return c.compression
}
diff --git a/exporter/local/export.go b/exporter/local/export.go
index 5daa4aa4268c..7d08b172e019 100644
--- a/exporter/local/export.go
+++ b/exporter/local/export.go
@@ -2,24 +2,28 @@ package local
import (
"context"
- "io/ioutil"
"os"
"strings"
"time"
- "github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter"
+ "github.com/moby/buildkit/exporter/containerimage/exptypes"
+ "github.com/moby/buildkit/exporter/util/epoch"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
- "github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/progress"
+ "github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
fstypes "github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
"golang.org/x/time/rate"
)
+const (
+ keyAttestationPrefix = "attestation-prefix"
+)
+
type Opt struct {
SessionManager *session.Manager
}
@@ -35,93 +39,103 @@ func New(opt Opt) (exporter.Exporter, error) {
}
func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
- return &localExporterInstance{localExporter: e}, nil
+ tm, _, err := epoch.ParseExporterAttrs(opt)
+ if err != nil {
+ return nil, err
+ }
+
+ i := &localExporterInstance{
+ localExporter: e,
+ opts: CreateFSOpts{
+ Epoch: tm,
+ },
+ }
+
+ for k, v := range opt {
+ switch k {
+ case keyAttestationPrefix:
+ i.opts.AttestationPrefix = v
+ }
+ }
+
+ return i, nil
}
type localExporterInstance struct {
*localExporter
+ opts CreateFSOpts
}
func (e *localExporterInstance) Name() string {
- return "exporting to client"
+ return "exporting to client directory"
}
-func (e *localExporter) Config() exporter.Config {
- return exporter.Config{}
+func (e *localExporter) Config() *exporter.Config {
+ return exporter.NewConfig()
}
-func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) {
+func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
+ if e.opts.Epoch == nil {
+ if tm, ok, err := epoch.ParseSource(inp); err != nil {
+ return nil, nil, err
+ } else if ok {
+ e.opts.Epoch = tm
+ }
+ }
+
caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false)
if err != nil {
- return nil, err
+ return nil, nil, err
}
isMap := len(inp.Refs) > 0
- export := func(ctx context.Context, k string, ref cache.ImmutableRef) func() error {
- return func() error {
- var src string
- var err error
- var idmap *idtools.IdentityMapping
- if ref == nil {
- src, err = ioutil.TempDir("", "buildkit")
- if err != nil {
- return err
- }
- defer os.RemoveAll(src)
- } else {
- mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID))
- if err != nil {
- return err
- }
-
- lm := snapshot.LocalMounter(mount)
+ if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; isMap && !ok {
+ return nil, nil, errors.Errorf("unable to export multiple refs, missing platforms mapping")
+ }
+ p, err := exptypes.ParsePlatforms(inp.Metadata)
+ if err != nil {
+ return nil, nil, err
+ }
- src, err = lm.Mount()
- if err != nil {
- return err
- }
+ if !isMap && len(p.Platforms) > 1 {
+ return nil, nil, errors.Errorf("unable to export multiple platforms without map")
+ }
- idmap = mount.IdentityMapping()
+ now := time.Now().Truncate(time.Second)
- defer lm.Unmount()
+ export := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) func() error {
+ return func() error {
+ outputFS, cleanup, err := CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts)
+ if err != nil {
+ return err
}
-
- walkOpt := &fsutil.WalkOpt{}
-
- if idmap != nil {
- walkOpt.Map = func(p string, st *fstypes.Stat) bool {
- uid, gid, err := idmap.ToContainer(idtools.Identity{
- UID: int(st.Uid),
- GID: int(st.Gid),
- })
- if err != nil {
- return false
- }
- st.Uid = uint32(uid)
- st.Gid = uint32(gid)
- return true
- }
+ if cleanup != nil {
+ defer cleanup()
}
- fs := fsutil.NewFS(src, walkOpt)
lbl := "copying files"
if isMap {
lbl += " " + k
- fs, err = fsutil.SubDirFS([]fsutil.Dir{{FS: fs, Stat: fstypes.Stat{
+ st := fstypes.Stat{
Mode: uint32(os.ModeDir | 0755),
Path: strings.Replace(k, "/", "_", -1),
- }}})
+ }
+ if e.opts.Epoch != nil {
+ st.ModTime = e.opts.Epoch.UnixNano()
+ }
+
+ outputFS, err = fsutil.SubDirFS([]fsutil.Dir{{FS: outputFS, Stat: st}})
if err != nil {
return err
}
}
- progress := newProgressHandler(ctx, lbl)
- if err := filesync.CopyToCaller(ctx, fs, caller, progress); err != nil {
+ progress := NewProgressHandler(ctx, lbl)
+ if err := filesync.CopyToCaller(ctx, outputFS, caller, progress); err != nil {
return err
}
return nil
@@ -130,21 +144,25 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source,
eg, ctx := errgroup.WithContext(ctx)
- if isMap {
- for k, ref := range inp.Refs {
- eg.Go(export(ctx, k, ref))
+ if len(p.Platforms) > 0 {
+ for _, p := range p.Platforms {
+ r, ok := inp.FindRef(p.ID)
+ if !ok {
+ return nil, nil, errors.Errorf("failed to find ref for ID %s", p.ID)
+ }
+ eg.Go(export(ctx, p.ID, r, inp.Attestations[p.ID]))
}
} else {
- eg.Go(export(ctx, "", inp.Ref))
+ eg.Go(export(ctx, "", inp.Ref, nil))
}
if err := eg.Wait(); err != nil {
- return nil, err
+ return nil, nil, err
}
- return nil, nil
+ return nil, nil, nil
}
-func newProgressHandler(ctx context.Context, id string) func(int, bool) {
+func NewProgressHandler(ctx context.Context, id string) func(int, bool) {
limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
pw, _, _ := progress.NewFromContext(ctx)
now := time.Now()
diff --git a/exporter/local/fs.go b/exporter/local/fs.go
new file mode 100644
index 000000000000..c5a524aae32f
--- /dev/null
+++ b/exporter/local/fs.go
@@ -0,0 +1,161 @@
+package local
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "io/fs"
+ "os"
+ "path"
+ "strconv"
+ "time"
+
+ "github.com/docker/docker/pkg/idtools"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/moby/buildkit/cache"
+ "github.com/moby/buildkit/exporter"
+ "github.com/moby/buildkit/exporter/attestation"
+ "github.com/moby/buildkit/session"
+ "github.com/moby/buildkit/snapshot"
+ "github.com/moby/buildkit/solver/result"
+ "github.com/moby/buildkit/util/staticfs"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+ "github.com/tonistiigi/fsutil"
+ fstypes "github.com/tonistiigi/fsutil/types"
+)
+
+type CreateFSOpts struct {
+ Epoch *time.Time
+ AttestationPrefix string
+}
+
+func CreateFS(ctx context.Context, sessionID string, k string, ref cache.ImmutableRef, attestations []exporter.Attestation, defaultTime time.Time, opt CreateFSOpts) (fsutil.FS, func() error, error) {
+ var cleanup func() error
+ var src string
+ var err error
+ var idmap *idtools.IdentityMapping
+ if ref == nil {
+ src, err = os.MkdirTemp("", "buildkit")
+ if err != nil {
+ return nil, nil, err
+ }
+ cleanup = func() error { return os.RemoveAll(src) }
+ } else {
+ mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ lm := snapshot.LocalMounter(mount)
+
+ src, err = lm.Mount()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ idmap = mount.IdentityMapping()
+
+ cleanup = lm.Unmount
+ }
+
+ walkOpt := &fsutil.WalkOpt{}
+ var idMapFunc func(p string, st *fstypes.Stat) fsutil.MapResult
+
+ if idmap != nil {
+ idMapFunc = func(p string, st *fstypes.Stat) fsutil.MapResult {
+ uid, gid, err := idmap.ToContainer(idtools.Identity{
+ UID: int(st.Uid),
+ GID: int(st.Gid),
+ })
+ if err != nil {
+ return fsutil.MapResultExclude
+ }
+ st.Uid = uint32(uid)
+ st.Gid = uint32(gid)
+ return fsutil.MapResultKeep
+ }
+ }
+
+ walkOpt.Map = func(p string, st *fstypes.Stat) fsutil.MapResult {
+ res := fsutil.MapResultKeep
+ if idMapFunc != nil {
+ res = idMapFunc(p, st)
+ }
+ if opt.Epoch != nil {
+ st.ModTime = opt.Epoch.UnixNano()
+ }
+ return res
+ }
+
+ outputFS := fsutil.NewFS(src, walkOpt)
+ attestations = attestation.Filter(attestations, nil, map[string][]byte{
+ result.AttestationInlineOnlyKey: []byte(strconv.FormatBool(true)),
+ })
+ attestations, err = attestation.Unbundle(ctx, session.NewGroup(sessionID), attestations)
+ if err != nil {
+ return nil, nil, err
+ }
+ if len(attestations) > 0 {
+ subjects := []intoto.Subject{}
+ err = outputFS.Walk(ctx, func(path string, info fs.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !info.Mode().IsRegular() {
+ return nil
+ }
+ f, err := outputFS.Open(path)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ d := digest.Canonical.Digester()
+ if _, err := io.Copy(d.Hash(), f); err != nil {
+ return err
+ }
+ subjects = append(subjects, intoto.Subject{
+ Name: path,
+ Digest: result.ToDigestMap(d.Digest()),
+ })
+ return nil
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ stmts, err := attestation.MakeInTotoStatements(ctx, session.NewGroup(sessionID), attestations, subjects)
+ if err != nil {
+ return nil, nil, err
+ }
+ stmtFS := staticfs.NewFS()
+
+ names := map[string]struct{}{}
+ for i, stmt := range stmts {
+ dt, err := json.MarshalIndent(stmt, "", " ")
+ if err != nil {
+ return nil, nil, errors.Wrap(err, "failed to marshal attestation")
+ }
+
+ name := opt.AttestationPrefix + path.Base(attestations[i].Path)
+ if _, ok := names[name]; ok {
+ return nil, nil, errors.Errorf("duplicate attestation path name %s", name)
+ }
+ names[name] = struct{}{}
+
+ st := fstypes.Stat{
+ Mode: 0600,
+ Path: name,
+ ModTime: defaultTime.UnixNano(),
+ }
+ if opt.Epoch != nil {
+ st.ModTime = opt.Epoch.UnixNano()
+ }
+ stmtFS.Add(name, st, dt)
+ }
+
+ outputFS = staticfs.NewMergeFS(outputFS, stmtFS)
+ }
+
+ return outputFS, cleanup, nil
+}
diff --git a/exporter/oci/export.go b/exporter/oci/export.go
index 153211c9b709..60982f4daf3c 100644
--- a/exporter/oci/export.go
+++ b/exporter/oci/export.go
@@ -4,19 +4,23 @@ import (
"context"
"encoding/base64"
"encoding/json"
+ "fmt"
"strconv"
"strings"
"time"
archiveexporter "github.com/containerd/containerd/images/archive"
"github.com/containerd/containerd/leases"
+ "github.com/containerd/containerd/remotes"
"github.com/docker/distribution/reference"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/moby/buildkit/cache"
cacheconfig "github.com/moby/buildkit/cache/config"
"github.com/moby/buildkit/exporter"
"github.com/moby/buildkit/exporter/containerimage"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/session"
+ sessioncontent "github.com/moby/buildkit/session/content"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
@@ -25,26 +29,18 @@ import (
"github.com/moby/buildkit/util/progress"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
- "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
)
type ExporterVariant string
const (
- keyImageName = "name"
- keyLayerCompression = "compression"
- VariantOCI = "oci"
- VariantDocker = "docker"
- ociTypes = "oci-mediatypes"
- keyForceCompression = "force-compression"
- keyCompressionLevel = "compression-level"
- keyBuildInfo = "buildinfo"
- keyBuildInfoAttrs = "buildinfo-attrs"
- // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was
- // already found to use a non-distributable media type.
- // When this option is not set, the exporter will change the media type of the layer to a distributable one.
- preferNondistLayersKey = "prefer-nondist-layers"
+ VariantOCI = "oci"
+ VariantDocker = "docker"
+)
+
+const (
+ keyTar = "tar"
)
type Opt struct {
@@ -64,85 +60,35 @@ func New(opt Opt) (exporter.Exporter, error) {
}
func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
- var ot *bool
i := &imageExporterInstance{
- imageExporter: e,
- layerCompression: compression.Default,
- buildInfo: true,
+ imageExporter: e,
+ tar: true,
+ opts: containerimage.ImageCommitOpts{
+ RefCfg: cacheconfig.RefConfig{
+ Compression: compression.New(compression.Default),
+ },
+ BuildInfo: true,
+ OCITypes: e.opt.Variant == VariantOCI,
+ },
+ }
+
+ opt, err := i.opts.Load(opt)
+ if err != nil {
+ return nil, err
}
- var esgz bool
+
for k, v := range opt {
switch k {
- case keyImageName:
- i.name = v
- case keyLayerCompression:
- switch v {
- case "gzip":
- i.layerCompression = compression.Gzip
- case "estargz":
- i.layerCompression = compression.EStargz
- esgz = true
- case "zstd":
- i.layerCompression = compression.Zstd
- case "uncompressed":
- i.layerCompression = compression.Uncompressed
- default:
- return nil, errors.Errorf("unsupported layer compression type: %v", v)
- }
- case keyForceCompression:
- if v == "" {
- i.forceCompression = true
- continue
- }
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value %v specified for %s", v, k)
- }
- i.forceCompression = b
- case keyCompressionLevel:
- ii, err := strconv.ParseInt(v, 10, 64)
- if err != nil {
- return nil, errors.Wrapf(err, "non-int value %s specified for %s", v, k)
- }
- v := int(ii)
- i.compressionLevel = &v
- case ociTypes:
- ot = new(bool)
- if v == "" {
- *ot = true
- continue
- }
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
- }
- *ot = b
- case keyBuildInfo:
- if v == "" {
- i.buildInfo = true
- continue
- }
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
- }
- i.buildInfo = b
- case keyBuildInfoAttrs:
+ case keyTar:
if v == "" {
- i.buildInfoAttrs = false
+ i.tar = true
continue
}
b, err := strconv.ParseBool(v)
if err != nil {
return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
}
- i.buildInfoAttrs = b
- case preferNondistLayersKey:
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value specified for %s", k)
- }
- i.preferNonDist = b
+ i.tar = b
default:
if i.meta == nil {
i.meta = make(map[string][]byte)
@@ -150,59 +96,27 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp
i.meta[k] = []byte(v)
}
}
- if ot == nil {
- i.ociTypes = e.opt.Variant == VariantOCI
- } else {
- i.ociTypes = *ot
- }
- if esgz && !i.ociTypes {
- logrus.Warn("forcibly turning on oci-mediatype mode for estargz")
- i.ociTypes = true
- }
return i, nil
}
type imageExporterInstance struct {
*imageExporter
- meta map[string][]byte
- name string
- ociTypes bool
- layerCompression compression.Type
- forceCompression bool
- compressionLevel *int
- buildInfo bool
- buildInfoAttrs bool
- preferNonDist bool
+ opts containerimage.ImageCommitOpts
+ tar bool
+ meta map[string][]byte
}
func (e *imageExporterInstance) Name() string {
- return "exporting to oci image format"
+ return fmt.Sprintf("exporting to %s image format", e.opt.Variant)
}
-func (e *imageExporterInstance) Config() exporter.Config {
- return exporter.Config{
- Compression: e.compression(),
- }
+func (e *imageExporterInstance) Config() *exporter.Config {
+ return exporter.NewConfigWithCompression(e.opts.RefCfg.Compression)
}
-func (e *imageExporterInstance) compression() compression.Config {
- c := compression.New(e.layerCompression).SetForce(e.forceCompression)
- if e.compressionLevel != nil {
- c = c.SetLevel(*e.compressionLevel)
- }
- return c
-}
-
-func (e *imageExporterInstance) refCfg() cacheconfig.RefConfig {
- return cacheconfig.RefConfig{
- Compression: e.compression(),
- PreferNonDistributable: e.preferNonDist,
- }
-}
-
-func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) {
+func (e *imageExporterInstance) Export(ctx context.Context, src *exporter.Source, sessionID string) (_ map[string]string, descref exporter.DescriptorReference, err error) {
if e.opt.Variant == VariantDocker && len(src.Refs) > 0 {
- return nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
+ return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
}
if src.Metadata == nil {
@@ -212,24 +126,43 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
src.Metadata[k] = v
}
+ opts := e.opts
+ as, _, err := containerimage.ParseAnnotations(src.Metadata)
+ if err != nil {
+ return nil, nil, err
+ }
+ opts.Annotations = opts.Annotations.Merge(as)
+
ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- defer done(context.TODO())
+ defer func() {
+ if descref == nil {
+ done(context.TODO())
+ }
+ }()
- desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.refCfg(), e.buildInfo, e.buildInfoAttrs, sessionID)
+ desc, err := e.opt.ImageWriter.Commit(ctx, src, sessionID, &opts)
if err != nil {
- return nil, err
+ return nil, nil, err
}
defer func() {
- e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest)
+ if err == nil {
+ descref = containerimage.NewDescriptorReference(*desc, done)
+ }
}()
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
- desc.Annotations[ocispecs.AnnotationCreated] = time.Now().UTC().Format(time.RFC3339)
+ if _, ok := desc.Annotations[ocispecs.AnnotationCreated]; !ok {
+ tm := time.Now()
+ if opts.Epoch != nil {
+ tm = *opts.Epoch
+ }
+ desc.Annotations[ocispecs.AnnotationCreated] = tm.UTC().Format(time.RFC3339)
+ }
resp := make(map[string]string)
@@ -241,17 +174,17 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
dtdesc, err := json.Marshal(desc)
if err != nil {
- return nil, err
+ return nil, nil, err
}
resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc)
- if n, ok := src.Metadata["image.name"]; e.name == "*" && ok {
- e.name = string(n)
+ if n, ok := src.Metadata["image.name"]; e.opts.ImageName == "*" && ok {
+ e.opts.ImageName = string(n)
}
- names, err := normalizedNames(e.name)
+ names, err := normalizedNames(e.opts.ImageName)
if err != nil {
- return nil, err
+ return nil, nil, err
}
if len(names) != 0 {
@@ -264,7 +197,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
expOpts = append(expOpts, archiveexporter.WithAllPlatforms(), archiveexporter.WithSkipDockerManifest())
case VariantDocker:
default:
- return nil, errors.Errorf("invalid variant %q", e.opt.Variant)
+ return nil, nil, errors.Errorf("invalid variant %q", e.opt.Variant)
}
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
@@ -272,26 +205,21 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false)
if err != nil {
- return nil, err
- }
-
- w, err := filesync.CopyFileWriter(ctx, resp, caller)
- if err != nil {
- return nil, err
+ return nil, nil, err
}
mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore())
if src.Ref != nil {
- remotes, err := src.Ref.GetRemotes(ctx, false, e.refCfg(), false, session.NewGroup(sessionID))
+ remotes, err := src.Ref.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
if err != nil {
- return nil, err
+ return nil, nil, err
}
remote := remotes[0]
// unlazy before tar export as the tar writer does not handle
// layer blobs in parallel (whereas unlazy does)
if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
if err := unlazier.Unlazy(ctx); err != nil {
- return nil, err
+ return nil, nil, err
}
}
for _, desc := range remote.Descriptors {
@@ -300,14 +228,14 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
if len(src.Refs) > 0 {
for _, r := range src.Refs {
- remotes, err := r.GetRemotes(ctx, false, e.refCfg(), false, session.NewGroup(sessionID))
+ remotes, err := r.GetRemotes(ctx, false, e.opts.RefCfg, false, session.NewGroup(sessionID))
if err != nil {
- return nil, err
+ return nil, nil, err
}
remote := remotes[0]
if unlazier, ok := remote.Provider.(cache.Unlazier); ok {
if err := unlazier.Unlazy(ctx); err != nil {
- return nil, err
+ return nil, nil, err
}
}
for _, desc := range remote.Descriptors {
@@ -316,36 +244,41 @@ func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source,
}
}
- report := oneOffProgress(ctx, "sending tarball")
- if err := archiveexporter.Export(ctx, mprovider, w, expOpts...); err != nil {
- w.Close()
+ if e.tar {
+ w, err := filesync.CopyFileWriter(ctx, resp, caller)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ report := progress.OneOff(ctx, "sending tarball")
+ if err := archiveexporter.Export(ctx, mprovider, w, expOpts...); err != nil {
+ w.Close()
+ if grpcerrors.Code(err) == codes.AlreadyExists {
+ return resp, nil, report(nil)
+ }
+ return nil, nil, report(err)
+ }
+ err = w.Close()
if grpcerrors.Code(err) == codes.AlreadyExists {
- return resp, report(nil)
+ return resp, nil, report(nil)
+ }
+ if err != nil {
+ return nil, nil, report(err)
+ }
+ report(nil)
+ } else {
+ ctx = remotes.WithMediaTypeKeyPrefix(ctx, intoto.PayloadType, "intoto")
+ store := sessioncontent.NewCallerStore(caller, "export")
+ if err != nil {
+ return nil, nil, err
+ }
+ err := contentutil.CopyChain(ctx, store, mprovider, *desc)
+ if err != nil {
+ return nil, nil, err
}
- return nil, report(err)
- }
- err = w.Close()
- if grpcerrors.Code(err) == codes.AlreadyExists {
- return resp, report(nil)
}
- return resp, report(err)
-}
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
- pw, _, _ := progress.NewFromContext(ctx)
- now := time.Now()
- st := progress.Status{
- Started: &now,
- }
- pw.Write(id, st)
- return func(err error) error {
- // TODO: set error on status
- now := time.Now()
- st.Completed = &now
- pw.Write(id, st)
- pw.Close()
- return err
- }
+ return resp, nil, nil
}
func normalizedNames(name string) ([]string, error) {
diff --git a/exporter/tar/export.go b/exporter/tar/export.go
index 0febefd0b023..4d136c89c1ca 100644
--- a/exporter/tar/export.go
+++ b/exporter/tar/export.go
@@ -2,18 +2,18 @@ package local
import (
"context"
- "io/ioutil"
"os"
"strconv"
"strings"
"time"
- "github.com/docker/docker/pkg/idtools"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter"
+ "github.com/moby/buildkit/exporter/containerimage/exptypes"
+ "github.com/moby/buildkit/exporter/local"
+ "github.com/moby/buildkit/exporter/util/epoch"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/filesync"
- "github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/progress"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
@@ -21,6 +21,8 @@ import (
)
const (
+ attestationPrefixKey = "attestation-prefix"
+
// preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was
// already found to use a non-distributable media type.
// When this option is not set, the exporter will change the media type of the layer to a distributable one.
@@ -44,13 +46,23 @@ func New(opt Opt) (exporter.Exporter, error) {
func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) {
li := &localExporterInstance{localExporter: e}
- v, ok := opt[preferNondistLayersKey]
- if ok {
- b, err := strconv.ParseBool(v)
- if err != nil {
- return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v)
+ tm, opt, err := epoch.ParseExporterAttrs(opt)
+ if err != nil {
+ return nil, err
+ }
+ li.opts.Epoch = tm
+
+ for k, v := range opt {
+ switch k {
+ case preferNondistLayersKey:
+ b, err := strconv.ParseBool(v)
+ if err != nil {
+ return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v)
+ }
+ li.preferNonDist = b
+ case attestationPrefixKey:
+ li.opts.AttestationPrefix = v
}
- li.preferNonDist = b
}
return li, nil
@@ -58,19 +70,20 @@ func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exp
type localExporterInstance struct {
*localExporter
+ opts local.CreateFSOpts
preferNonDist bool
}
func (e *localExporterInstance) Name() string {
- return "exporting to client"
+ return "exporting to client tarball"
}
-func (e *localExporterInstance) Config() exporter.Config {
- return exporter.Config{}
+func (e *localExporterInstance) Config() *exporter.Config {
+ return exporter.NewConfig()
}
-func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) {
- var defers []func()
+func (e *localExporterInstance) Export(ctx context.Context, inp *exporter.Source, sessionID string) (map[string]string, exporter.DescriptorReference, error) {
+ var defers []func() error
defer func() {
for i := len(defers) - 1; i >= 0; i-- {
@@ -78,80 +91,79 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source,
}
}()
- getDir := func(ctx context.Context, k string, ref cache.ImmutableRef) (*fsutil.Dir, error) {
- var src string
- var err error
- var idmap *idtools.IdentityMapping
- if ref == nil {
- src, err = ioutil.TempDir("", "buildkit")
- if err != nil {
- return nil, err
- }
- defers = append(defers, func() { os.RemoveAll(src) })
- } else {
- mount, err := ref.Mount(ctx, true, session.NewGroup(sessionID))
- if err != nil {
- return nil, err
- }
-
- lm := snapshot.LocalMounter(mount)
-
- src, err = lm.Mount()
- if err != nil {
- return nil, err
- }
+ if e.opts.Epoch == nil {
+ if tm, ok, err := epoch.ParseSource(inp); err != nil {
+ return nil, nil, err
+ } else if ok {
+ e.opts.Epoch = tm
+ }
+ }
- idmap = mount.IdentityMapping()
+ now := time.Now().Truncate(time.Second)
- defers = append(defers, func() { lm.Unmount() })
+ getDir := func(ctx context.Context, k string, ref cache.ImmutableRef, attestations []exporter.Attestation) (*fsutil.Dir, error) {
+ outputFS, cleanup, err := local.CreateFS(ctx, sessionID, k, ref, attestations, now, e.opts)
+ if err != nil {
+ return nil, err
+ }
+ if cleanup != nil {
+ defers = append(defers, cleanup)
}
- walkOpt := &fsutil.WalkOpt{}
-
- if idmap != nil {
- walkOpt.Map = func(p string, st *fstypes.Stat) bool {
- uid, gid, err := idmap.ToContainer(idtools.Identity{
- UID: int(st.Uid),
- GID: int(st.Gid),
- })
- if err != nil {
- return false
- }
- st.Uid = uint32(uid)
- st.Gid = uint32(gid)
- return true
- }
+ st := fstypes.Stat{
+ Mode: uint32(os.ModeDir | 0755),
+ Path: strings.Replace(k, "/", "_", -1),
+ }
+ if e.opts.Epoch != nil {
+ st.ModTime = e.opts.Epoch.UnixNano()
}
return &fsutil.Dir{
- FS: fsutil.NewFS(src, walkOpt),
- Stat: fstypes.Stat{
- Mode: uint32(os.ModeDir | 0755),
- Path: strings.Replace(k, "/", "_", -1),
- },
+ FS: outputFS,
+ Stat: st,
}, nil
}
+ isMap := len(inp.Refs) > 0
+ if _, ok := inp.Metadata[exptypes.ExporterPlatformsKey]; isMap && !ok {
+ return nil, nil, errors.Errorf("unable to export multiple refs, missing platforms mapping")
+ }
+ p, err := exptypes.ParsePlatforms(inp.Metadata)
+ if err != nil {
+ return nil, nil, err
+ }
+ if !isMap && len(p.Platforms) > 1 {
+ return nil, nil, errors.Errorf("unable to export multiple platforms without map")
+ }
+
var fs fsutil.FS
- if len(inp.Refs) > 0 {
- dirs := make([]fsutil.Dir, 0, len(inp.Refs))
- for k, ref := range inp.Refs {
- d, err := getDir(ctx, k, ref)
+ if len(p.Platforms) > 0 {
+ dirs := make([]fsutil.Dir, 0, len(p.Platforms))
+ for _, p := range p.Platforms {
+ r, ok := inp.FindRef(p.ID)
+ if !ok {
+ return nil, nil, errors.Errorf("failed to find ref for ID %s", p.ID)
+ }
+ d, err := getDir(ctx, p.ID, r, inp.Attestations[p.ID])
if err != nil {
- return nil, err
+ return nil, nil, err
}
dirs = append(dirs, *d)
}
- var err error
- fs, err = fsutil.SubDirFS(dirs)
- if err != nil {
- return nil, err
+ if isMap {
+ var err error
+ fs, err = fsutil.SubDirFS(dirs)
+ if err != nil {
+ return nil, nil, err
+ }
+ } else {
+ fs = dirs[0].FS
}
} else {
- d, err := getDir(ctx, "", inp.Ref)
+ d, err := getDir(ctx, "", inp.Ref, nil)
if err != nil {
- return nil, err
+ return nil, nil, err
}
fs = d.FS
}
@@ -161,34 +173,17 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source,
caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false)
if err != nil {
- return nil, err
+ return nil, nil, err
}
w, err := filesync.CopyFileWriter(ctx, nil, caller)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- report := oneOffProgress(ctx, "sending tarball")
+ report := progress.OneOff(ctx, "sending tarball")
if err := fsutil.WriteTar(ctx, fs, w); err != nil {
w.Close()
- return nil, report(err)
- }
- return nil, report(w.Close())
-}
-
-func oneOffProgress(ctx context.Context, id string) func(err error) error {
- pw, _, _ := progress.NewFromContext(ctx)
- now := time.Now()
- st := progress.Status{
- Started: &now,
- }
- pw.Write(id, st)
- return func(err error) error {
- // TODO: set error on status
- now := time.Now()
- st.Completed = &now
- pw.Write(id, st)
- pw.Close()
- return err
+ return nil, nil, report(err)
}
+ return nil, nil, report(w.Close())
}
diff --git a/exporter/util/epoch/parse.go b/exporter/util/epoch/parse.go
new file mode 100644
index 000000000000..63f806e1b76a
--- /dev/null
+++ b/exporter/util/epoch/parse.go
@@ -0,0 +1,65 @@
+package epoch
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/moby/buildkit/exporter"
+ "github.com/moby/buildkit/exporter/containerimage/exptypes"
+ "github.com/pkg/errors"
+)
+
+const (
+ frontendSourceDateEpochArg = "build-arg:SOURCE_DATE_EPOCH"
+
+ KeySourceDateEpoch = "source-date-epoch"
+)
+
+func ParseBuildArgs(opt map[string]string) (string, bool) {
+ v, ok := opt[frontendSourceDateEpochArg]
+ return v, ok
+}
+
+func ParseExporterAttrs(opt map[string]string) (*time.Time, map[string]string, error) {
+ rest := make(map[string]string, len(opt))
+
+ var tm *time.Time
+
+ for k, v := range opt {
+ switch k {
+ case KeySourceDateEpoch:
+ var err error
+ tm, err = parseTime(k, v)
+ if err != nil {
+ return nil, nil, err
+ }
+ default:
+ rest[k] = v
+ }
+ }
+
+ return tm, rest, nil
+}
+
+func ParseSource(inp *exporter.Source) (*time.Time, bool, error) {
+ if v, ok := inp.Metadata[exptypes.ExporterEpochKey]; ok {
+ epoch, err := parseTime("", string(v))
+ if err != nil {
+ return nil, false, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH from frontend: %q", v)
+ }
+ return epoch, true, nil
+ }
+ return nil, false, nil
+}
+
+func parseTime(key, value string) (*time.Time, error) {
+ if value == "" {
+ return nil, nil
+ }
+ sde, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid %s: %s", key, err)
+ }
+ tm := time.Unix(sde, 0).UTC()
+ return &tm, nil
+}
diff --git a/frontend/attestations/parse.go b/frontend/attestations/parse.go
new file mode 100644
index 000000000000..00de649fdefe
--- /dev/null
+++ b/frontend/attestations/parse.go
@@ -0,0 +1,81 @@
+package attestations
+
+import (
+ "encoding/csv"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+const (
+ KeyTypeSbom = "sbom"
+ KeyTypeProvenance = "provenance"
+)
+
+const (
+ defaultSBOMGenerator = "docker/buildkit-syft-scanner:stable-1"
+)
+
+func Filter(v map[string]string) map[string]string {
+ attests := make(map[string]string)
+ for k, v := range v {
+ if strings.HasPrefix(k, "attest:") {
+ attests[k] = v
+ continue
+ }
+ if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
+ attests[k] = v
+ continue
+ }
+ }
+ return attests
+}
+
+func Validate(values map[string]map[string]string) (map[string]map[string]string, error) {
+ for k := range values {
+ if k != KeyTypeSbom && k != KeyTypeProvenance {
+ return nil, errors.Errorf("unknown attestation type %q", k)
+ }
+ }
+ return values, nil
+}
+
+func Parse(values map[string]string) (map[string]map[string]string, error) {
+ attests := make(map[string]string)
+ for k, v := range values {
+ if strings.HasPrefix(k, "attest:") {
+ attests[strings.ToLower(strings.TrimPrefix(k, "attest:"))] = v
+ continue
+ }
+ if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
+ attests[strings.ToLower(strings.TrimPrefix(k, "build-arg:BUILDKIT_ATTEST_"))] = v
+ continue
+ }
+ }
+
+ out := make(map[string]map[string]string)
+ for k, v := range attests {
+ attrs := make(map[string]string)
+ out[k] = attrs
+ if k == KeyTypeSbom {
+ attrs["generator"] = defaultSBOMGenerator
+ }
+ if v == "" {
+ continue
+ }
+ csvReader := csv.NewReader(strings.NewReader(v))
+ fields, err := csvReader.Read()
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse %s", k)
+ }
+ for _, field := range fields {
+ parts := strings.SplitN(field, "=", 2)
+ if len(parts) != 2 {
+ parts = append(parts, "")
+ }
+ attrs[parts[0]] = parts[1]
+ }
+ }
+
+ return Validate(out)
+}
diff --git a/frontend/attestations/sbom/sbom.go b/frontend/attestations/sbom/sbom.go
new file mode 100644
index 000000000000..113797b2139c
--- /dev/null
+++ b/frontend/attestations/sbom/sbom.go
@@ -0,0 +1,112 @@
+package sbom
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/moby/buildkit/client/llb"
+ gatewaypb "github.com/moby/buildkit/frontend/gateway/pb"
+ "github.com/moby/buildkit/solver/result"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+const (
+ CoreSBOMName = "sbom"
+ ExtraSBOMPrefix = CoreSBOMName + "-"
+
+ srcDir = "/run/src/"
+ outDir = "/run/out/"
+)
+
+// Scanner is a function type for scanning the contents of a state and
+// returning a new attestation and state representing the scan results.
+//
+// A scanner is designed a scan a single state, however, additional states can
+// also be attached, for attaching additional information, such as scans of
+// build-contexts or multi-stage builds. Handling these separately allows the
+// scanner to optionally ignore these or to mark them as such in the
+// attestation.
+type Scanner func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error)
+
+func CreateSBOMScanner(ctx context.Context, resolver llb.ImageMetaResolver, scanner string) (Scanner, error) {
+ if scanner == "" {
+ return nil, nil
+ }
+
+ _, dt, err := resolver.ResolveImageConfig(ctx, scanner, llb.ResolveImageConfigOpt{})
+ if err != nil {
+ return nil, err
+ }
+
+ var cfg ocispecs.Image
+ if err := json.Unmarshal(dt, &cfg); err != nil {
+ return nil, err
+ }
+
+ var args []string
+ args = append(args, cfg.Config.Entrypoint...)
+ args = append(args, cfg.Config.Cmd...)
+ if len(args) == 0 {
+ return nil, errors.Errorf("scanner %s does not have cmd", scanner)
+ }
+
+ return func(ctx context.Context, name string, ref llb.State, extras map[string]llb.State, opts ...llb.ConstraintsOpt) (result.Attestation[llb.State], error) {
+ var env []string
+ env = append(env, cfg.Config.Env...)
+ env = append(env, "BUILDKIT_SCAN_DESTINATION="+outDir)
+ env = append(env, "BUILDKIT_SCAN_SOURCE="+path.Join(srcDir, "core", CoreSBOMName))
+ if len(extras) > 0 {
+ env = append(env, "BUILDKIT_SCAN_SOURCE_EXTRAS="+path.Join(srcDir, "extras/"))
+ }
+
+ runOpts := []llb.RunOption{
+ llb.WithCustomName(fmt.Sprintf("[%s] generating sbom using %s", name, scanner)),
+ }
+ for _, opt := range opts {
+ runOpts = append(runOpts, opt)
+ }
+ runOpts = append(runOpts, llb.Dir(cfg.Config.WorkingDir))
+ runOpts = append(runOpts, llb.Args(args))
+ for _, e := range env {
+ k, v, _ := strings.Cut(e, "=")
+ runOpts = append(runOpts, llb.AddEnv(k, v))
+ }
+
+ runscan := llb.Image(scanner).Run(runOpts...)
+ runscan.AddMount("/tmp", llb.Scratch(), llb.Tmpfs())
+
+ runscan.AddMount(path.Join(srcDir, "core", CoreSBOMName), ref, llb.Readonly)
+ for k, extra := range extras {
+ runscan.AddMount(path.Join(srcDir, "extras", ExtraSBOMPrefix+k), extra, llb.Readonly)
+ }
+
+ stsbom := runscan.AddMount(outDir, llb.Scratch())
+ return result.Attestation[llb.State]{
+ Kind: gatewaypb.AttestationKindBundle,
+ Ref: stsbom,
+ Metadata: map[string][]byte{
+ result.AttestationReasonKey: []byte(result.AttestationReasonSBOM),
+ result.AttestationSBOMCore: []byte(CoreSBOMName),
+ },
+ InToto: result.InTotoAttestation{
+ PredicateType: intoto.PredicateSPDX,
+ },
+ }, nil
+ }, nil
+}
+
+func HasSBOM[T any](res *result.Result[T]) bool {
+ for _, as := range res.Attestations {
+ for _, a := range as {
+ if a.InToto.PredicateType == intoto.PredicateSPDX {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/frontend/dockerfile/builder/build.go b/frontend/dockerfile/builder/build.go
index e0a1806901bc..aafd9c9a73f3 100644
--- a/frontend/dockerfile/builder/build.go
+++ b/frontend/dockerfile/builder/build.go
@@ -12,6 +12,7 @@ import (
"regexp"
"strconv"
"strings"
+ "time"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
@@ -19,15 +20,20 @@ import (
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
+ "github.com/moby/buildkit/frontend"
+ "github.com/moby/buildkit/frontend/attestations"
+ "github.com/moby/buildkit/frontend/attestations/sbom"
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
"github.com/moby/buildkit/frontend/dockerfile/dockerignore"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/gateway/client"
gwpb "github.com/moby/buildkit/frontend/gateway/pb"
+ "github.com/moby/buildkit/frontend/subrequests/outline"
+ "github.com/moby/buildkit/frontend/subrequests/targets"
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
- "github.com/moby/buildkit/util/apicaps"
- binfotypes "github.com/moby/buildkit/util/buildinfo/types"
+ "github.com/moby/buildkit/solver/result"
+ "github.com/moby/buildkit/util/gitutil"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
@@ -39,45 +45,59 @@ const (
defaultDockerfileName = "Dockerfile"
dockerignoreFilename = ".dockerignore"
- buildArgPrefix = "build-arg:"
- labelPrefix = "label:"
-
- keyTarget = "target"
- keyFilename = "filename"
- keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports
- keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry
- keyCgroupParent = "cgroup-parent"
- keyContextSubDir = "contextsubdir"
- keyForceNetwork = "force-network-mode"
- keyGlobalAddHosts = "add-hosts"
- keyHostname = "hostname"
- keyImageResolveMode = "image-resolve-mode"
- keyMultiPlatform = "multi-platform"
- keyNameContext = "contextkey"
- keyNameDockerfile = "dockerfilekey"
- keyNoCache = "no-cache"
- keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented
- keyShmSize = "shm-size"
- keyTargetPlatform = "platform"
- keyUlimit = "ulimit"
+ buildArgPrefix = "build-arg:"
+ labelPrefix = "label:"
+ contextPrefix = "context:"
+ inputMetadataPrefix = "input-metadata:"
+
+ keyTarget = "target"
+ keyFilename = "filename"
+ keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports
+ keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry
+ keyCgroupParent = "cgroup-parent"
+ keyContextSubDir = "contextsubdir"
+ keyForceNetwork = "force-network-mode"
+ keyGlobalAddHosts = "add-hosts"
+ keyHostname = "hostname"
+ keyImageResolveMode = "image-resolve-mode"
+ keyMultiPlatform = "multi-platform"
+ keyNameContext = "contextkey"
+ keyNameDockerfile = "dockerfilekey"
+ keyNoCache = "no-cache"
+ keyShmSize = "shm-size"
+ keyTargetPlatform = "platform"
+ keyUlimit = "ulimit"
+ keyRequestID = "requestid"
// Don't forget to update frontend documentation if you add
- // a new build-arg: frontend/dockerfile/docs/syntax.md
+ // a new build-arg: frontend/dockerfile/docs/reference.md
keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS"
keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR"
keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME"
keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM"
keySyntaxArg = "build-arg:BUILDKIT_SYNTAX"
+ keySourceDateEpoch = "build-arg:SOURCE_DATE_EPOCH"
)
var httpPrefix = regexp.MustCompile(`^https?://`)
-var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
-func Build(ctx context.Context, c client.Client) (*client.Result, error) {
+func Build(ctx context.Context, c client.Client) (_ *client.Result, err error) {
opts := c.BuildOpts().Opts
caps := c.BuildOpts().LLBCaps
gwcaps := c.BuildOpts().Caps
+ if err := caps.Supports(pb.CapFileBase); err != nil {
+ return nil, errors.Wrap(err, "needs BuildKit 0.5 or later")
+ }
+ if opts["override-copy-image"] != "" {
+ return nil, errors.New("support for \"override-copy-image\" was removed in BuildKit 0.11")
+ }
+ if v, ok := opts["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok {
+ if b, err := strconv.ParseBool(v); err == nil && b {
+ return nil, errors.New("support for \"build-arg:BUILDKIT_DISABLE_FILEOP\" was removed in BuildKit 0.11")
+ }
+ }
+
allowForward, capsError := validateCaps(opts["frontend.caps"])
if !allowForward && capsError != nil {
return nil, capsError
@@ -168,11 +188,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
llb.Differ(llb.DiffNone, false),
)
- fileop := useFileOp(opts, &caps)
-
var buildContext *llb.State
isNotLocalContext := false
- if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDirArg]); ok {
+ keepGit := false
+ if v, err := strconv.ParseBool(opts[keyContextKeepGitDirArg]); err == nil {
+ keepGit = v
+ }
+ if st, ok := detectGitContext(opts[localNameContext], keepGit); ok {
if !forceLocalDockerfile {
src = *st
}
@@ -205,28 +227,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
return nil, errors.Wrapf(err, "failed to read downloaded context")
}
if isArchive(dt) {
- if fileop {
- bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{
- AttemptUnpack: true,
- }))
- if !forceLocalDockerfile {
- src = bc
- }
- buildContext = &bc
- } else {
- copyImage := opts[keyOverrideCopyImage]
- if copyImage == "" {
- copyImage = dockerfile2llb.DefaultCopyImage
- }
- unpack := llb.Image(copyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
- Run(llb.Shlex("copy --unpack /src/context /out/"), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("extracting build context"))
- unpack.AddMount("/src", httpContext, llb.Readonly)
- bc := unpack.AddMount("/out", llb.Scratch())
- if !forceLocalDockerfile {
- src = bc
- }
- buildContext = &bc
+ bc := llb.Scratch().File(llb.Copy(httpContext, "/context", "/", &llb.CopyInfo{
+ AttemptUnpack: true,
+ }))
+ if !forceLocalDockerfile {
+ src = bc
}
+ buildContext = &bc
} else {
filename = "context"
if !forceLocalDockerfile {
@@ -257,7 +264,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
if buildContext != nil {
if sub, ok := opts[keyContextSubDir]; ok {
- buildContext = scopeToSubDir(buildContext, fileop, sub)
+ buildContext = scopeToSubDir(buildContext, sub)
}
}
@@ -380,7 +387,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
return nil, errors.Wrapf(err, "failed with %s = %s", keySyntaxArg, cmdline)
}
return res, err
- } else if ref, cmdline, loc, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)); ok {
+ } else if ref, cmdline, loc, ok := parser.DetectSyntax(dtDockerfile); ok {
res, err := forwardGateway(ctx, c, ref, cmdline)
if err != nil && len(errdefs.Sources(err)) == 0 {
return nil, wrapSource(err, sourceMap, loc)
@@ -408,7 +415,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
return nil, errors.Errorf("invalid boolean value %s", v)
}
if !b && exportMap {
- return nil, errors.Errorf("returning multiple target plaforms is not allowed")
+ return nil, errors.Errorf("returning multiple target platforms is not allowed")
}
exportMap = b
}
@@ -422,55 +429,107 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
opts[keyHostname] = v
}
- eg, ctx = errgroup.WithContext(ctx)
+ epoch, err := parseSourceDateEpoch(opts[keySourceDateEpoch])
+ if err != nil {
+ return nil, err
+ }
+
+ target := opts[keyTarget]
+ convertOpt := dockerfile2llb.ConvertOpt{
+ Target: target,
+ MetaResolver: c,
+ BuildArgs: filter(opts, buildArgPrefix),
+ Labels: filter(opts, labelPrefix),
+ CacheIDNamespace: opts[keyCacheNSArg],
+ SessionID: c.BuildOpts().SessionID,
+ BuildContext: buildContext,
+ Excludes: excludes,
+ IgnoreCache: ignoreCache,
+ TargetPlatform: targetPlatforms[0],
+ BuildPlatforms: buildPlatforms,
+ ImageResolveMode: resolveMode,
+ PrefixPlatform: exportMap,
+ ExtraHosts: extraHosts,
+ ShmSize: shmSize,
+ Ulimit: ulimit,
+ CgroupParent: opts[keyCgroupParent],
+ ForceNetMode: defaultNetMode,
+ LLBCaps: &caps,
+ SourceMap: sourceMap,
+ Hostname: opts[keyHostname],
+ SourceDateEpoch: epoch,
+ Warn: func(msg, url string, detail [][]byte, location *parser.Range) {
+ c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url))
+ },
+ ContextByName: contextByNameFunc(c, c.BuildOpts().SessionID),
+ }
+
+ defer func() {
+ var el *parser.ErrorLocation
+ if errors.As(err, &el) {
+ err = wrapSource(err, sourceMap, el.Location)
+ }
+ }()
+
+ if req, ok := opts[keyRequestID]; ok {
+ switch req {
+ case outline.SubrequestsOutlineDefinition.Name:
+ o, err := dockerfile2llb.Dockefile2Outline(ctx, dtDockerfile, convertOpt)
+ if err != nil {
+ return nil, err
+ }
+ return o.ToResult()
+ case targets.SubrequestsTargetsDefinition.Name:
+ targets, err := dockerfile2llb.ListTargets(ctx, dtDockerfile)
+ if err != nil {
+ return nil, err
+ }
+ return targets.ToResult()
+ default:
+ return nil, errdefs.NewUnsupportedSubrequestError(req)
+ }
+ }
+
+ var scanner sbom.Scanner
+ attests, err := attestations.Parse(opts)
+ if err != nil {
+ return nil, err
+ }
+ if attrs, ok := attests[attestations.KeyTypeSbom]; ok {
+ src, ok := attrs["generator"]
+ if !ok {
+ return nil, errors.Errorf("sbom scanner cannot be empty")
+ }
+ ref, err := reference.ParseNormalizedNamed(src)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to parse sbom scanner %s", src)
+ }
+ ref = reference.TagNameOnly(ref)
+
+ scanner, err = sbom.CreateSBOMScanner(ctx, c, ref.String())
+ if err != nil {
+ return nil, err
+ }
+ }
+ scanTargets := make([]*dockerfile2llb.SBOMTargets, len(targetPlatforms))
+
+ eg, ctx2 = errgroup.WithContext(ctx)
for i, tp := range targetPlatforms {
func(i int, tp *ocispecs.Platform) {
eg.Go(func() (err error) {
- defer func() {
- var el *parser.ErrorLocation
- if errors.As(err, &el) {
- err = wrapSource(err, sourceMap, el.Location)
- }
- }()
-
- st, img, bi, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{
- Target: opts[keyTarget],
- MetaResolver: c,
- BuildArgs: filter(opts, buildArgPrefix),
- Labels: filter(opts, labelPrefix),
- CacheIDNamespace: opts[keyCacheNSArg],
- SessionID: c.BuildOpts().SessionID,
- BuildContext: buildContext,
- Excludes: excludes,
- IgnoreCache: ignoreCache,
- TargetPlatform: tp,
- BuildPlatforms: buildPlatforms,
- ImageResolveMode: resolveMode,
- PrefixPlatform: exportMap,
- ExtraHosts: extraHosts,
- ShmSize: shmSize,
- Ulimit: ulimit,
- CgroupParent: opts[keyCgroupParent],
- ForceNetMode: defaultNetMode,
- OverrideCopyImage: opts[keyOverrideCopyImage],
- LLBCaps: &caps,
- SourceMap: sourceMap,
- Hostname: opts[keyHostname],
- Warn: func(msg, url string, detail [][]byte, location *parser.Range) {
- if i != 0 {
- return
- }
- c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url))
- },
- ContextByName: contextByNameFunc(c, tp),
- })
-
+ opt := convertOpt
+ opt.TargetPlatform = tp
+ if i != 0 {
+ opt.Warn = nil
+ }
+ opt.ContextByName = contextByNameFunc(c, c.BuildOpts().SessionID)
+ st, img, scanTarget, err := dockerfile2llb.Dockerfile2LLB(ctx2, dtDockerfile, opt)
if err != nil {
return err
}
- def, err := st.Marshal(ctx)
+ def, err := st.Marshal(ctx2)
if err != nil {
return errors.Wrapf(err, "failed to marshal LLB definition")
}
@@ -506,7 +565,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
}
}
- r, err := c.Solve(ctx, client.SolveRequest{
+ r, err := c.Solve(ctx2, client.SolveRequest{
Definition: def.ToPB(),
CacheImports: cacheImports,
})
@@ -519,30 +578,30 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
return err
}
- buildinfo, err := json.Marshal(bi)
- if err != nil {
- return errors.Wrapf(err, "failed to marshal build info")
+ p := platforms.DefaultSpec()
+ if tp != nil {
+ p = *tp
}
+ p = platforms.Normalize(p)
+ k := platforms.Format(p)
if !exportMap {
res.AddMeta(exptypes.ExporterImageConfigKey, config)
- res.AddMeta(exptypes.ExporterBuildInfo, buildinfo)
res.SetRef(ref)
- } else {
- p := platforms.DefaultSpec()
- if tp != nil {
- p = *tp
- }
- k := platforms.Format(p)
+ expPlatforms.Platforms[i] = exptypes.Platform{
+ ID: k,
+ Platform: p,
+ }
+ } else {
res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config)
- res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), buildinfo)
res.AddRef(k, ref)
expPlatforms.Platforms[i] = exptypes.Platform{
ID: k,
Platform: p,
}
}
+ scanTargets[i] = scanTarget
return nil
})
}(i, tp)
@@ -552,14 +611,45 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) {
return nil, err
}
- if exportMap {
- dt, err := json.Marshal(expPlatforms)
- if err != nil {
- return nil, err
+ if scanner != nil {
+ for i, p := range expPlatforms.Platforms {
+ target := scanTargets[i]
+
+ var opts []llb.ConstraintsOpt
+ if target.IgnoreCache {
+ opts = append(opts, llb.IgnoreCache)
+ }
+ att, err := scanner(ctx, p.ID, target.Core, target.Extras, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ attSolve, err := result.ConvertAttestation(&att, func(st llb.State) (client.Reference, error) {
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ r, err := c.Solve(ctx, frontend.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ return r.Ref, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ res.AddAttestation(p.ID, *attSolve)
}
- res.AddMeta(exptypes.ExporterPlatformsKey, dt)
}
+ dt, err := json.Marshal(expPlatforms)
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, dt)
+
return res, nil
}
@@ -606,40 +696,21 @@ func filter(opt map[string]string, key string) map[string]string {
return m
}
-func detectGitContext(ref, gitContext string) (*llb.State, bool) {
- found := false
- if httpPrefix.MatchString(ref) && gitURLPathWithFragmentSuffix.MatchString(ref) {
- found = true
- }
-
- keepGit := false
- if gitContext != "" {
- if v, err := strconv.ParseBool(gitContext); err == nil {
- keepGit = v
- }
- }
-
- for _, prefix := range []string{"git://", "github.com/", "git@"} {
- if strings.HasPrefix(ref, prefix) {
- found = true
- break
- }
- }
- if !found {
+func detectGitContext(ref string, keepGit bool) (*llb.State, bool) {
+ g, err := gitutil.ParseGitRef(ref)
+ if err != nil {
return nil, false
}
-
- parts := strings.SplitN(ref, "#", 2)
- branch := ""
- if len(parts) > 1 {
- branch = parts[1]
+ commit := g.Commit
+ if g.SubDir != "" {
+ commit += ":" + g.SubDir
}
gitOpts := []llb.GitOption{dockerfile2llb.WithInternalName("load git source " + ref)}
if keepGit {
gitOpts = append(gitOpts, llb.KeepGitDir())
}
- st := llb.Git(parts[0], branch, gitOpts...)
+ st := llb.Git(g.Remote, commit, gitOpts...)
return &st, true
}
@@ -765,27 +836,10 @@ func parseNetMode(v string) (pb.NetMode, error) {
}
}
-func useFileOp(args map[string]string, caps *apicaps.CapSet) bool {
- enabled := true
- if v, ok := args["build-arg:BUILDKIT_DISABLE_FILEOP"]; ok {
- if b, err := strconv.ParseBool(v); err == nil {
- enabled = !b
- }
- }
- return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil
-}
-
-func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State {
- if fileop {
- bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{
- CopyDirContentsOnly: true,
- }))
- return &bc
- }
- unpack := llb.Image(dockerfile2llb.DefaultCopyImage, dockerfile2llb.WithInternalName("helper image for file operations")).
- Run(llb.Shlexf("copy %s/. /out/", path.Join("/src", dir)), llb.ReadonlyRootFS(), dockerfile2llb.WithInternalName("filtering build context"))
- unpack.AddMount("/src", *c, llb.Readonly)
- bc := unpack.AddMount("/out", llb.Scratch())
+func scopeToSubDir(c *llb.State, dir string) *llb.State {
+ bc := llb.Scratch().File(llb.Copy(*c, dir, "/", &llb.CopyInfo{
+ CopyDirContentsOnly: true,
+ }))
return &bc
}
@@ -812,11 +866,11 @@ func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) c
return opts
}
-func contextByNameFunc(c client.Client, p *ocispecs.Platform) func(context.Context, string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) {
- return func(ctx context.Context, name string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) {
+func contextByNameFunc(c client.Client, sessionID string) func(context.Context, string, string, *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) {
+ return func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, error) {
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
- return nil, nil, nil, errors.Wrapf(err, "invalid context name %s", name)
+ return nil, nil, errors.Wrapf(err, "invalid context name %s", name)
}
name = strings.TrimSuffix(reference.FamiliarString(named), ":latest")
@@ -825,77 +879,174 @@ func contextByNameFunc(c client.Client, p *ocispecs.Platform) func(context.Conte
p = &pp
}
if p != nil {
- name := name + "::" + platforms.Format(platforms.Normalize(*p))
- st, img, bi, err := contextByName(ctx, c, name, p)
+ pname := name + "::" + platforms.Format(platforms.Normalize(*p))
+ st, img, err := contextByName(ctx, c, sessionID, name, pname, p, resolveMode)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
if st != nil {
- return st, img, bi, nil
+ return st, img, nil
}
}
- return contextByName(ctx, c, name, p)
+ return contextByName(ctx, c, sessionID, name, name, p, resolveMode)
}
}
-func contextByName(ctx context.Context, c client.Client, name string, platform *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) {
+func contextByName(ctx context.Context, c client.Client, sessionID, name string, pname string, platform *ocispecs.Platform, resolveMode string) (*llb.State, *dockerfile2llb.Image, error) {
opts := c.BuildOpts().Opts
- v, ok := opts["context:"+name]
+ v, ok := opts[contextPrefix+pname]
if !ok {
- return nil, nil, nil, nil
+ return nil, nil, nil
}
vv := strings.SplitN(v, ":", 2)
if len(vv) != 2 {
- return nil, nil, nil, errors.Errorf("invalid context specifier %s for %s", v, name)
+ return nil, nil, errors.Errorf("invalid context specifier %s for %s", v, pname)
+ }
+ // allow git@ without protocol for SSH URLs for backwards compatibility
+ if strings.HasPrefix(vv[0], "git@") {
+ vv[0] = "git"
}
switch vv[0] {
case "docker-image":
ref := strings.TrimPrefix(vv[1], "//")
+ if ref == "scratch" {
+ st := llb.Scratch()
+ return &st, nil, nil
+ }
+
imgOpt := []llb.ImageOption{
- llb.WithCustomName("[context " + name + "] " + ref),
- llb.WithMetaResolver(c),
+ llb.WithCustomName("[context " + pname + "] " + ref),
}
if platform != nil {
imgOpt = append(imgOpt, llb.Platform(*platform))
}
+
+ named, err := reference.ParseNormalizedNamed(ref)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ named = reference.TagNameOnly(named)
+
+ _, data, err := c.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{
+ Platform: platform,
+ ResolveMode: resolveMode,
+ LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, ref),
+ ResolverType: llb.ResolverTypeRegistry,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var img dockerfile2llb.Image
+ if err := json.Unmarshal(data, &img); err != nil {
+ return nil, nil, err
+ }
+ img.Created = nil
+
st := llb.Image(ref, imgOpt...)
- return &st, nil, nil, nil
+ st, err = st.WithImageConfig(data)
+ if err != nil {
+ return nil, nil, err
+ }
+ return &st, &img, nil
case "git":
- st, ok := detectGitContext(v, "1")
+ st, ok := detectGitContext(v, true)
if !ok {
- return nil, nil, nil, errors.Errorf("invalid git context %s", v)
+ return nil, nil, errors.Errorf("invalid git context %s", v)
}
- return st, nil, nil, nil
+ return st, nil, nil
case "http", "https":
- st, ok := detectGitContext(v, "1")
+ st, ok := detectGitContext(v, true)
if !ok {
- httpst := llb.HTTP(v, llb.WithCustomName("[context "+name+"] "+v))
+ httpst := llb.HTTP(v, llb.WithCustomName("[context "+pname+"] "+v))
st = &httpst
}
- return st, nil, nil, nil
+ return st, nil, nil
+ case "oci-layout":
+ refSpec := strings.TrimPrefix(vv[1], "//")
+ ref, err := reference.Parse(refSpec)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", refSpec)
+ }
+ named, ok := ref.(reference.Named)
+ if !ok {
+ return nil, nil, errors.Errorf("oci-layout reference %q has no name", ref.String())
+ }
+ dgstd, ok := named.(reference.Digested)
+ if !ok {
+ return nil, nil, errors.Errorf("oci-layout reference %q has no digest", named.String())
+ }
+
+ // for the dummy ref primarily used in log messages, we can use the
+ // original name, since the store key may not be significant
+ dummyRef, err := reference.ParseNormalizedNamed(name)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "could not parse oci-layout reference %q", name)
+ }
+ dummyRef, err = reference.WithDigest(dummyRef, dgstd.Digest())
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name)
+ }
+
+ _, data, err := c.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{
+ Platform: platform,
+ ResolveMode: resolveMode,
+ LogName: fmt.Sprintf("[context %s] load metadata for %s", pname, dummyRef.String()),
+ ResolverType: llb.ResolverTypeOCILayout,
+ Store: llb.ResolveImageConfigOptStore{
+ SessionID: sessionID,
+ StoreID: named.Name(),
+ },
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var img dockerfile2llb.Image
+ if err := json.Unmarshal(data, &img); err != nil {
+ return nil, nil, errors.Wrap(err, "could not parse oci-layout image config")
+ }
+
+ ociOpt := []llb.OCILayoutOption{
+ llb.WithCustomName("[context " + pname + "] OCI load from client"),
+ llb.OCIStore(c.BuildOpts().SessionID, named.Name()),
+ }
+ if platform != nil {
+ ociOpt = append(ociOpt, llb.Platform(*platform))
+ }
+ st := llb.OCILayout(
+ dummyRef.String(),
+ ociOpt...,
+ )
+ st, err = st.WithImageConfig(data)
+ if err != nil {
+ return nil, nil, err
+ }
+ return &st, &img, nil
case "local":
st := llb.Local(vv[1],
llb.SessionID(c.BuildOpts().SessionID),
llb.FollowPaths([]string{dockerignoreFilename}),
- llb.SharedKeyHint("context:"+name+"-"+dockerignoreFilename),
- llb.WithCustomName("[context "+name+"] load "+dockerignoreFilename),
+ llb.SharedKeyHint("context:"+pname+"-"+dockerignoreFilename),
+ llb.WithCustomName("[context "+pname+"] load "+dockerignoreFilename),
llb.Differ(llb.DiffNone, false),
)
def, err := st.Marshal(ctx)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
res, err := c.Solve(ctx, client.SolveRequest{
Evaluate: true,
Definition: def.ToPB(),
})
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
ref, err := res.SingleRef()
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
dt, _ := ref.ReadFile(ctx, client.ReadRequest{
Filename: dockerignoreFilename,
@@ -904,58 +1055,46 @@ func contextByName(ctx context.Context, c client.Client, name string, platform *
if len(dt) != 0 {
excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt))
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
}
st = llb.Local(vv[1],
- llb.WithCustomName("[context "+name+"] load from client"),
+ llb.WithCustomName("[context "+pname+"] load from client"),
llb.SessionID(c.BuildOpts().SessionID),
- llb.SharedKeyHint("context:"+name),
+ llb.SharedKeyHint("context:"+pname),
llb.ExcludePatterns(excludes),
)
- return &st, nil, nil, nil
+ return &st, nil, nil
case "input":
inputs, err := c.Inputs(ctx)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
st, ok := inputs[vv[1]]
if !ok {
- return nil, nil, nil, errors.Errorf("invalid input %s for %s", vv[1], name)
+ return nil, nil, errors.Errorf("invalid input %s for %s", vv[1], pname)
}
- md, ok := opts["input-metadata:"+vv[1]]
+ md, ok := opts[inputMetadataPrefix+vv[1]]
if ok {
m := make(map[string][]byte)
if err := json.Unmarshal([]byte(md), &m); err != nil {
- return nil, nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md)
- }
- var bi *binfotypes.BuildInfo
- if dtbi, ok := m[exptypes.ExporterBuildInfo]; ok {
- var depbi binfotypes.BuildInfo
- if err := json.Unmarshal(dtbi, &depbi); err != nil {
- return nil, nil, nil, errors.Wrapf(err, "failed to parse buildinfo for %s", name)
- }
- bi = &binfotypes.BuildInfo{
- Deps: map[string]binfotypes.BuildInfo{
- strings.SplitN(vv[1], "::", 2)[0]: depbi,
- },
- }
+ return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md)
}
var img *dockerfile2llb.Image
if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok {
st, err = st.WithImageConfig(dtic)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, err
}
if err := json.Unmarshal(dtic, &img); err != nil {
- return nil, nil, nil, errors.Wrapf(err, "failed to parse image config for %s", name)
+ return nil, nil, errors.Wrapf(err, "failed to parse image config for %s", pname)
}
}
- return &st, img, bi, nil
+ return &st, img, nil
}
- return &st, nil, nil, nil
+ return &st, nil, nil
default:
- return nil, nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], name)
+ return nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], pname)
}
}
@@ -985,3 +1124,15 @@ func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error {
}
return errdefs.WithSource(err, s)
}
+
+func parseSourceDateEpoch(v string) (*time.Time, error) {
+ if v == "" {
+ return nil, nil
+ }
+ sde, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ return nil, errors.Wrapf(err, "invalid SOURCE_DATE_EPOCH: %s", v)
+ }
+ tm := time.Unix(sde, 0).UTC()
+ return &tm, nil
+}
diff --git a/frontend/dockerfile/builder/subrequests.go b/frontend/dockerfile/builder/subrequests.go
index 6d30b7b8cc1c..844953023822 100644
--- a/frontend/dockerfile/builder/subrequests.go
+++ b/frontend/dockerfile/builder/subrequests.go
@@ -1,16 +1,19 @@
package builder
import (
+ "bytes"
"context"
"encoding/json"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/frontend/subrequests"
+ "github.com/moby/buildkit/frontend/subrequests/outline"
+ "github.com/moby/buildkit/frontend/subrequests/targets"
"github.com/moby/buildkit/solver/errdefs"
)
func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Result, bool, error) {
- req, ok := opts["requestid"]
+ req, ok := opts[keyRequestID]
if !ok {
return nil, false, nil
}
@@ -18,6 +21,8 @@ func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Resul
case subrequests.RequestSubrequestsDescribe:
res, err := describe()
return res, true, err
+ case outline.RequestSubrequestsOutline, targets.RequestTargets: // handled later
+ return nil, false, nil
default:
return nil, true, errdefs.NewUnsupportedSubrequestError(req)
}
@@ -25,15 +30,25 @@ func checkSubRequest(ctx context.Context, opts map[string]string) (*client.Resul
func describe() (*client.Result, error) {
all := []subrequests.Request{
+ outline.SubrequestsOutlineDefinition,
+ targets.SubrequestsTargetsDefinition,
subrequests.SubrequestsDescribeDefinition,
}
- dt, err := json.MarshalIndent(all, " ", "")
+ dt, err := json.MarshalIndent(all, "", " ")
if err != nil {
return nil, err
}
+
+ b := bytes.NewBuffer(nil)
+ if err := subrequests.PrintDescribe(dt, b); err != nil {
+ return nil, err
+ }
+
res := client.NewResult()
res.Metadata = map[string][]byte{
"result.json": dt,
+ "result.txt": b.Bytes(),
+ "version": []byte(subrequests.SubrequestsDescribeDefinition.Version),
}
return res, nil
}
diff --git a/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile b/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile
index 7f181948816b..23b1fd288fa0 100644
--- a/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile
+++ b/frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile
@@ -1,9 +1,9 @@
# syntax=docker/dockerfile-upstream:master
# xx is a helper for cross-compilation
-FROM --platform=$BUILDPLATFORM tonistiigi/xx:golang@sha256:810dc54d5144f133a218e88e319184bf8b9ce01d37d46ddb37573e90decd9eef AS xx
+FROM --platform=$BUILDPLATFORM tonistiigi/xx:master@sha256:d4254d9739ce2de9fb88e09bdc716aa0c65f0446a2a2143399f991d71136a3d4 AS xx
-FROM --platform=$BUILDPLATFORM golang:1.17-alpine AS base
+FROM --platform=$BUILDPLATFORM golang:1.19-alpine AS base
RUN apk add git bash
COPY --from=xx / /
WORKDIR /src
diff --git a/frontend/dockerfile/cmd/dockerfile-frontend/hack/release b/frontend/dockerfile/cmd/dockerfile-frontend/hack/release
index b572bda1756b..7a7909c5fca3 100755
--- a/frontend/dockerfile/cmd/dockerfile-frontend/hack/release
+++ b/frontend/dockerfile/cmd/dockerfile-frontend/hack/release
@@ -1,10 +1,11 @@
#!/usr/bin/env bash
. $(dirname $0)/../../../../../hack/util
-set -e
+set -eu
-: ${PLATFORMS=}
-: ${DAILY_TARGETS=}
+: "${RELEASE=false}"
+: "${PLATFORMS=}"
+: "${DAILY_TARGETS=}"
usage() {
echo "$0 (master|tag|daily) (tag|channel) [push]"
@@ -23,7 +24,7 @@ parseTag() {
fi
local suffix=$(echo $1 | awk -F- '{print $NF}')
local tagf=./frontend/dockerfile/release/$suffix/tags
- if [ "$sufffix" == "$1" ] || [ ! -f $tagf ]; then
+ if [ "$suffix" == "$1" ] || [ ! -f $tagf ]; then
suffix="mainline"
fi
@@ -70,6 +71,11 @@ if [ "$PUSH" = "push" ]; then
pushFlag="push=true"
fi
+nocacheFilterFlag=""
+if [[ "$RELEASE" = "true" ]] && [[ "$GITHUB_ACTIONS" = "true" ]]; then
+ nocacheFilterFlag="--no-cache-filter=base"
+fi
+
case $TYP in
"master")
tagf=./frontend/dockerfile/release/$TAG/tags
@@ -84,10 +90,10 @@ case $TYP in
pushTag=${pushTag}-$TAG
fi
- buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \
+ buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag $(buildAttestFlags) \
--build-arg "CHANNEL=$TAG" \
--build-arg "BUILDTAGS=$buildTags" \
- --output "type=image,name=$REPO:$pushTag,buildinfo-attrs=true,$pushFlag" \
+ --output "type=image,name=$REPO:$pushTag,$pushFlag" \
--file "./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile" \
$currentcontext
;;
@@ -101,10 +107,10 @@ case $TYP in
fi
buildTags=$(cat $tagf)
- buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \
+ buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag $(buildAttestFlags) \
--build-arg "CHANNEL=$TAG" \
--build-arg "BUILDTAGS=$buildTags" \
- --output "type=image,\"name=$publishedNames\",buildinfo-attrs=true,$pushFlag" \
+ --output "type=image,\"name=$publishedNames\",$pushFlag" \
--file "./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile" \
$currentcontext
;;
@@ -127,7 +133,7 @@ case $TYP in
tmp=$(mktemp -d -t buildid.XXXXXXXXXX)
dt=$(date +%Y%m%d)
- buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \
+ buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag \
--target "buildid" \
--build-arg "CHANNEL=$TAG" \
--build-arg "BUILDTAGS=$buildTags" \
@@ -141,10 +147,10 @@ case $TYP in
buildid=$(cat $tmp/buildid)
echo "buildid: $buildid"
- buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags \
+ buildxCmd build $platformFlag $cacheFromFlags $cacheToFlags $nocacheFilterFlag $(buildAttestFlags) \
--build-arg "CHANNEL=$TAG" \
--build-arg "BUILDTAGS=$buildTags" \
- --output "type=image,name=$REPO:$dt-$TAG,buildinfo-attrs=true,$pushFlag" \
+ --output "type=image,name=$REPO:$dt-$TAG,$pushFlag" \
--file "./frontend/dockerfile/cmd/dockerfile-frontend/Dockerfile" \
$currentcontext
rm $tmp/buildid
diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go
index 7ac6b9bdd774..6476267e2d32 100644
--- a/frontend/dockerfile/dockerfile2llb/convert.go
+++ b/frontend/dockerfile/dockerfile2llb/convert.go
@@ -13,22 +13,28 @@ import (
"sort"
"strconv"
"strings"
+ "time"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
"github.com/docker/go-connections/nat"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/imagemetaresolver"
+ "github.com/moby/buildkit/exporter/containerimage/image"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/moby/buildkit/frontend/dockerfile/shell"
+ "github.com/moby/buildkit/frontend/subrequests/outline"
+ "github.com/moby/buildkit/frontend/subrequests/targets"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
+ "github.com/moby/buildkit/util/gitutil"
"github.com/moby/buildkit/util/suggest"
"github.com/moby/buildkit/util/system"
"github.com/moby/sys/signal"
+ digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
@@ -39,9 +45,15 @@ const (
defaultContextLocalName = "context"
historyComment = "buildkit.dockerfile.v0"
- DefaultCopyImage = "docker/dockerfile-copy:v0.1.9@sha256:e8f159d3f00786604b93c675ee2783f8dc194bb565e61ca5788f6a6e9d304061"
+ sbomScanContext = "BUILDKIT_SBOM_SCAN_CONTEXT"
+ sbomScanStage = "BUILDKIT_SBOM_SCAN_STAGE"
)
+var nonEnvArgs = map[string]struct{}{
+ sbomScanContext: {},
+ sbomScanStage: {},
+}
+
type ConvertOpt struct {
Target string
MetaResolver llb.ImageMetaResolver
@@ -54,57 +66,127 @@ type ConvertOpt struct {
// Empty slice means ignore cache for all stages. Nil doesn't disable cache.
IgnoreCache []string
// CacheIDNamespace scopes the IDs for different cache mounts
- CacheIDNamespace string
- ImageResolveMode llb.ResolveMode
- TargetPlatform *ocispecs.Platform
- BuildPlatforms []ocispecs.Platform
- PrefixPlatform bool
- ExtraHosts []llb.HostIP
- ShmSize int64
- Ulimit []pb.Ulimit
- CgroupParent string
- ForceNetMode pb.NetMode
- OverrideCopyImage string
- LLBCaps *apicaps.CapSet
- ContextLocalName string
- SourceMap *llb.SourceMap
- Hostname string
- Warn func(short, url string, detail [][]byte, location *parser.Range)
- ContextByName func(context.Context, string) (*llb.State, *Image, *binfotypes.BuildInfo, error)
-}
-
-func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *binfotypes.BuildInfo, error) {
- buildInfo := &binfotypes.BuildInfo{}
+ CacheIDNamespace string
+ ImageResolveMode llb.ResolveMode
+ TargetPlatform *ocispecs.Platform
+ BuildPlatforms []ocispecs.Platform
+ PrefixPlatform bool
+ ExtraHosts []llb.HostIP
+ ShmSize int64
+ Ulimit []pb.Ulimit
+ CgroupParent string
+ ForceNetMode pb.NetMode
+ LLBCaps *apicaps.CapSet
+ ContextLocalName string
+ SourceMap *llb.SourceMap
+ Hostname string
+ SourceDateEpoch *time.Time
+ Warn func(short, url string, detail [][]byte, location *parser.Range)
+ ContextByName func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error)
+}
+
+type SBOMTargets struct {
+ Core llb.State
+ Extras map[string]llb.State
+
+ IgnoreCache bool
+}
+
+func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *SBOMTargets, error) {
+ ds, err := toDispatchState(ctx, dt, opt)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ sbom := SBOMTargets{
+ Core: ds.state,
+ Extras: map[string]llb.State{},
+ }
+ if ds.scanContext {
+ sbom.Extras["context"] = ds.opt.buildContext
+ }
+ if ds.ignoreCache {
+ sbom.IgnoreCache = true
+ }
+ for _, dsi := range findReachable(ds) {
+ if ds != dsi && dsi.scanStage {
+ sbom.Extras[dsi.stageName] = dsi.state
+ if dsi.ignoreCache {
+ sbom.IgnoreCache = true
+ }
+ }
+ }
+
+ return &ds.state, &ds.image, &sbom, nil
+}
+
+func Dockefile2Outline(ctx context.Context, dt []byte, opt ConvertOpt) (*outline.Outline, error) {
+ ds, err := toDispatchState(ctx, dt, opt)
+ if err != nil {
+ return nil, err
+ }
+ o := ds.Outline(dt)
+ return &o, nil
+}
+
+func ListTargets(ctx context.Context, dt []byte) (*targets.List, error) {
+ dockerfile, err := parser.Parse(bytes.NewReader(dt))
+ if err != nil {
+ return nil, err
+ }
+ stages, _, err := instructions.Parse(dockerfile.AST)
+ if err != nil {
+ return nil, err
+ }
+
+ l := &targets.List{
+ Sources: [][]byte{dt},
+ }
+
+ for i, s := range stages {
+ t := targets.Target{
+ Name: s.Name,
+ Description: s.Comment,
+ Default: i == len(stages)-1,
+ Base: s.BaseName,
+ Platform: s.Platform,
+ Location: toSourceLocation(s.Location),
+ }
+ l.Targets = append(l.Targets, t)
+ }
+ return l, nil
+}
+
+func toDispatchState(ctx context.Context, dt []byte, opt ConvertOpt) (*dispatchState, error) {
contextByName := opt.ContextByName
- opt.ContextByName = func(ctx context.Context, name string) (*llb.State, *Image, *binfotypes.BuildInfo, error) {
+ opt.ContextByName = func(ctx context.Context, name, resolveMode string, p *ocispecs.Platform) (*llb.State, *Image, error) {
if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") {
if contextByName != nil {
- st, img, bi, err := contextByName(ctx, name)
- if err != nil {
- return nil, nil, nil, err
+ if p == nil {
+ p = opt.TargetPlatform
}
- if bi != nil && bi.Deps != nil {
- for k := range bi.Deps {
- if buildInfo.Deps == nil {
- buildInfo.Deps = make(map[string]binfotypes.BuildInfo)
- }
- buildInfo.Deps[k] = bi.Deps[k]
- }
+ st, img, err := contextByName(ctx, name, resolveMode, p)
+ if err != nil {
+ return nil, nil, err
}
- return st, img, bi, nil
+ return st, img, nil
}
}
- return nil, nil, nil, nil
+ return nil, nil, nil
}
if len(dt) == 0 {
- return nil, nil, nil, errors.Errorf("the Dockerfile cannot be empty")
+ return nil, errors.Errorf("the Dockerfile cannot be empty")
}
if opt.ContextLocalName == "" {
opt.ContextLocalName = defaultContextLocalName
}
+ if opt.Warn == nil {
+ opt.Warn = func(string, string, [][]byte, *parser.Range) {}
+ }
+
platformOpt := buildPlatformOpt(&opt)
optMetaArgs := getPlatformArgs(platformOpt)
@@ -114,7 +196,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
dockerfile, err := parser.Parse(bytes.NewReader(dt))
if err != nil {
- return nil, nil, nil, err
+ return nil, err
}
for _, w := range dockerfile.Warnings {
@@ -125,17 +207,27 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
stages, metaArgs, err := instructions.Parse(dockerfile.AST)
if err != nil {
- return nil, nil, nil, err
+ return nil, err
}
shlex := shell.NewLex(dockerfile.EscapeToken)
+ outline := newOutlineCapture()
for _, cmd := range metaArgs {
for _, metaArg := range cmd.Args {
+ info := argInfo{definition: metaArg, location: cmd.Location()}
+ if v, ok := opt.BuildArgs[metaArg.Key]; !ok {
+ if metaArg.Value != nil {
+ *metaArg.Value, info.deps, _ = shlex.ProcessWordWithMatches(*metaArg.Value, metaArgsToMap(optMetaArgs))
+ }
+ } else {
+ metaArg.Value = &v
+ }
+ optMetaArgs = append(optMetaArgs, metaArg)
if metaArg.Value != nil {
- *metaArg.Value, _ = shlex.ProcessWordWithMap(*metaArg.Value, metaArgsToMap(optMetaArgs))
+ info.value = *metaArg.Value
}
- optMetaArgs = append(optMetaArgs, setKVValue(metaArg, opt.BuildArgs))
+ outline.allArgs[metaArg.Key] = info
}
}
@@ -148,60 +240,73 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
// set base state for every image
for i, st := range stages {
- name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs))
+ name, used, err := shlex.ProcessWordWithMatches(st.BaseName, metaArgsToMap(optMetaArgs))
if err != nil {
- return nil, nil, nil, parser.WithLocation(err, st.Location)
+ return nil, parser.WithLocation(err, st.Location)
}
if name == "" {
- return nil, nil, nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location)
+ return nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location)
}
st.BaseName = name
ds := &dispatchState{
+ stage: st,
deps: make(map[*dispatchState]struct{}),
ctxPaths: make(map[string]struct{}),
stageName: st.Name,
prefixPlatform: opt.PrefixPlatform,
+ outline: outline.clone(),
+ epoch: opt.SourceDateEpoch,
+ }
+
+ if v := st.Platform; v != "" {
+ v, u, err := shlex.ProcessWordWithMatches(v, metaArgsToMap(optMetaArgs))
+ if err != nil {
+ return nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location)
+ }
+
+ p, err := platforms.Parse(v)
+ if err != nil {
+ return nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location)
+ }
+ for k := range u {
+ used[k] = struct{}{}
+ }
+ ds.platform = &p
}
if st.Name != "" {
- s, img, bi, err := opt.ContextByName(ctx, st.Name)
+ s, img, err := opt.ContextByName(ctx, st.Name, opt.ImageResolveMode.String(), ds.platform)
if err != nil {
- return nil, nil, nil, err
+ return nil, err
}
if s != nil {
ds.noinit = true
ds.state = *s
if img != nil {
- ds.image = *img
- }
- if bi != nil {
- ds.buildInfo = *bi
+ ds.image = clampTimes(*img, opt.SourceDateEpoch)
+ if img.Architecture != "" && img.OS != "" {
+ ds.platform = &ocispecs.Platform{
+ OS: img.OS,
+ Architecture: img.Architecture,
+ Variant: img.Variant,
+ }
+ }
}
allDispatchStates.addState(ds)
continue
}
}
- ds.stage = st
-
if st.Name == "" {
ds.stageName = fmt.Sprintf("stage-%d", i)
}
- if v := st.Platform; v != "" {
- v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs))
- if err != nil {
- return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location)
- }
+ allDispatchStates.addState(ds)
- p, err := platforms.Parse(v)
- if err != nil {
- return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location)
- }
- ds.platform = &p
+ for k := range used {
+ ds.outline.usedArgs[k] = struct{}{}
}
- allDispatchStates.addState(ds)
total := 0
if ds.stage.BaseName != emptyImageName && ds.base == nil {
@@ -212,9 +317,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand:
total++
case *instructions.WorkdirCommand:
- if useFileOp(opt.BuildArgs, opt.LLBCaps) {
- total++
- }
+ total++
}
}
ds.cmdTotal = total
@@ -239,7 +342,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
var ok bool
target, ok = allDispatchStates.findStateByName(opt.Target)
if !ok {
- return nil, nil, nil, errors.Errorf("target stage %s could not be found", opt.Target)
+ return nil, errors.Errorf("target stage %s could not be found", opt.Target)
}
}
@@ -249,7 +352,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
for i, cmd := range d.stage.Commands {
newCmd, err := toCommand(cmd, allDispatchStates)
if err != nil {
- return nil, nil, nil, err
+ return nil, err
}
d.commands[i] = newCmd
for _, src := range newCmd.sources {
@@ -264,7 +367,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
if has, state := hasCircularDependency(allDispatchStates.states); has {
- return nil, nil, nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName)
+ return nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName)
}
if len(allDispatchStates.states) == 1 {
@@ -307,7 +410,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
d.stage.BaseName = reference.TagNameOnly(ref).String()
var isScratch bool
- st, img, bi, err := opt.ContextByName(ctx, d.stage.BaseName)
+ st, img, err := opt.ContextByName(ctx, d.stage.BaseName, opt.ImageResolveMode.String(), platform)
if err != nil {
return err
}
@@ -317,10 +420,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
} else {
d.image = emptyImage(platformOpt.targetPlatform)
}
- if bi != nil {
- d.buildInfo = *bi
- }
- d.state = *st
+ d.state = st.Platform(*platform)
d.platform = platform
return nil
}
@@ -331,9 +431,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
prefix += "internal]"
dgst, dt, err := metaResolver.ResolveImageConfig(ctx, d.stage.BaseName, llb.ResolveImageConfigOpt{
- Platform: platform,
- ResolveMode: opt.ImageResolveMode.String(),
- LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName),
+ Platform: platform,
+ ResolveMode: opt.ImageResolveMode.String(),
+ LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName),
+ ResolverType: llb.ResolverTypeRegistry,
})
if err != nil {
return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true)
@@ -396,30 +497,17 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
if err := eg.Wait(); err != nil {
- return nil, nil, nil, err
+ return nil, err
}
buildContext := &mutableOutput{}
ctxPaths := map[string]struct{}{}
for _, d := range allDispatchStates.states {
- if !isReachable(target, d) {
+ if !isReachable(target, d) || d.noinit {
continue
}
- // collect build sources and dependencies
- if len(d.buildInfo.Sources) > 0 {
- buildInfo.Sources = append(buildInfo.Sources, d.buildInfo.Sources...)
- }
- if d.buildInfo.Deps != nil {
- for name, bi := range d.buildInfo.Deps {
- if buildInfo.Deps == nil {
- buildInfo.Deps = make(map[string]binfotypes.BuildInfo)
- }
- buildInfo.Deps[name] = bi
- }
- }
-
if d.base != nil {
d.state = d.base.state
d.platform = d.base.platform
@@ -428,11 +516,11 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
// make sure that PATH is always set
if _, ok := shell.BuildEnvs(d.image.Config.Env)["PATH"]; !ok {
- var os string
+ var pathOS string
if d.platform != nil {
- os = d.platform.OS
+ pathOS = d.platform.OS
}
- d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(os))
+ d.image.Config.Env = append(d.image.Config.Env, "PATH="+system.DefaultPathEnv(pathOS))
}
// initialize base metadata from image conf
@@ -445,12 +533,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
}
if d.image.Config.WorkingDir != "" {
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil {
- return nil, nil, nil, parser.WithLocation(err, d.stage.Location)
+ return nil, parser.WithLocation(err, d.stage.Location)
}
}
if d.image.Config.User != "" {
if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil {
- return nil, nil, nil, parser.WithLocation(err, d.stage.Location)
+ return nil, parser.WithLocation(err, d.stage.Location)
}
}
d.state = d.state.Network(opt.ForceNetMode)
@@ -470,35 +558,37 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
shmSize: opt.ShmSize,
ulimit: opt.Ulimit,
cgroupParent: opt.CgroupParent,
- copyImage: opt.OverrideCopyImage,
llbCaps: opt.LLBCaps,
sourceMap: opt.SourceMap,
}
- if opt.copyImage == "" {
- opt.copyImage = DefaultCopyImage
- }
if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil {
- return nil, nil, nil, parser.WithLocation(err, d.stage.Location)
+ return nil, parser.WithLocation(err, d.stage.Location)
}
d.image.Config.OnBuild = nil
for _, cmd := range d.commands {
if err := dispatch(d, cmd, opt); err != nil {
- return nil, nil, nil, parser.WithLocation(err, cmd.Location())
+ return nil, parser.WithLocation(err, cmd.Location())
}
}
+ d.opt = opt
for p := range d.ctxPaths {
ctxPaths[p] = struct{}{}
}
- }
- // sort build sources
- if len(buildInfo.Sources) > 0 {
- sort.Slice(buildInfo.Sources, func(i, j int) bool {
- return buildInfo.Sources[i].Ref < buildInfo.Sources[j].Ref
- })
+ locals := []instructions.KeyValuePairOptional{}
+ locals = append(locals, d.opt.metaArgs...)
+ locals = append(locals, d.buildArgs...)
+ for _, a := range locals {
+ switch a.Key {
+ case sbomScanStage:
+ d.scanStage = isEnabledForStage(d.stageName, a.ValueString())
+ case sbomScanContext:
+ d.scanContext = isEnabledForStage(d.stageName, a.ValueString())
+ }
+ }
}
if len(opt.Labels) != 0 && target.image.Config.Labels == nil {
@@ -530,7 +620,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
if opt.LLBCaps != nil {
defaults = append(defaults, llb.WithCaps(*opt.LLBCaps))
}
- st := target.state.SetMarshalDefaults(defaults...)
+ target.state = target.state.SetMarshalDefaults(defaults...)
if !platformOpt.implicitTarget {
target.image.OS = platformOpt.targetPlatform.OS
@@ -538,7 +628,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
target.image.Variant = platformOpt.targetPlatform.Variant
}
- return &st, &target.image, buildInfo, nil
+ return target, nil
}
func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string {
@@ -598,7 +688,6 @@ type dispatchOpt struct {
shmSize int64
ulimit []pb.Ulimit
cgroupParent string
- copyImage string
llbCaps *apicaps.CapSet
sourceMap *llb.SourceMap
}
@@ -643,17 +732,25 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
case *instructions.WorkdirCommand:
err = dispatchWorkdir(d, c, true, &opt)
case *instructions.AddCommand:
- err = dispatchCopy(d, copyConfig{
- params: c.SourcesAndDest,
- source: opt.buildContext,
- isAddCommand: true,
- cmdToPrint: c,
- chown: c.Chown,
- chmod: c.Chmod,
- link: c.Link,
- location: c.Location(),
- opt: opt,
- })
+ var checksum digest.Digest
+ if c.Checksum != "" {
+ checksum, err = digest.Parse(c.Checksum)
+ }
+ if err == nil {
+ err = dispatchCopy(d, copyConfig{
+ params: c.SourcesAndDest,
+ source: opt.buildContext,
+ isAddCommand: true,
+ cmdToPrint: c,
+ chown: c.Chown,
+ chmod: c.Chmod,
+ link: c.Link,
+ keepGitDir: c.KeepGitDir,
+ checksum: checksum,
+ location: c.Location(),
+ opt: opt,
+ })
+ }
if err == nil {
for _, src := range c.SourcePaths {
if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") {
@@ -710,6 +807,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
}
type dispatchState struct {
+ opt dispatchOpt
state llb.State
image Image
platform *ocispecs.Platform
@@ -728,6 +826,10 @@ type dispatchState struct {
cmdTotal int
prefixPlatform bool
buildInfo binfotypes.BuildInfo
+ outline outlineCapture
+ epoch *time.Time
+ scanStage bool
+ scanContext bool
}
type dispatchStates struct {
@@ -744,6 +846,7 @@ func (dss *dispatchStates) addState(ds *dispatchState) {
if d, ok := dss.statesByName[ds.stage.BaseName]; ok {
ds.base = d
+ ds.outline = d.outline.clone()
}
if ds.stage.Name != "" {
dss.statesByName[strings.ToLower(ds.stage.Name)] = ds
@@ -803,7 +906,7 @@ func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error {
d.state = d.state.AddEnv(e.Key, e.Value)
d.image.Config.Env = addEnv(d.image.Config.Env, e.Key, e.Value)
}
- return commitToHistory(&d.image, commitMessage.String(), false, nil)
+ return commitToHistory(&d.image, commitMessage.String(), false, nil, d.epoch)
}
func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error {
@@ -814,7 +917,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
var args []string = c.CmdLine
if len(c.Files) > 0 {
if len(args) != 1 || !c.PrependShell {
- return fmt.Errorf("parsing produced an invalid run command: %v", args)
+ return errors.Errorf("parsing produced an invalid run command: %v", args)
}
if heredoc := parser.MustParseHeredoc(args[0]); heredoc != nil {
@@ -933,7 +1036,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
}
d.state = d.state.Run(opt...).Root()
- return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state)
+ return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state, d.epoch)
}
func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error {
@@ -945,7 +1048,7 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo
d.image.Config.WorkingDir = wd
if commit {
withLayer := false
- if wd != "/" && opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) {
+ if wd != "/" {
mkdirOpt := []llb.MkdirOption{llb.WithParents(true)}
if user := d.image.Config.User; user != "" {
mkdirOpt = append(mkdirOpt, llb.WithUser(user))
@@ -964,12 +1067,12 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo
)
withLayer = true
}
- return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil)
+ return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil, d.epoch)
}
return nil
}
-func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error {
+func dispatchCopy(d *dispatchState, cfg copyConfig) error {
pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath)
if err != nil {
return err
@@ -994,6 +1097,21 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error {
}
}
+ if cfg.checksum != "" {
+ if !cfg.isAddCommand {
+ return errors.New("checksum can't be specified for COPY")
+ }
+ if !addChecksumEnabled {
+ return errors.New("instruction 'ADD --checksum=' requires the labs channel")
+ }
+ if len(cfg.params.SourcePaths) != 1 {
+ return errors.New("checksum can't be specified for multiple sources")
+ }
+ if !isHTTPSource(cfg.params.SourcePaths[0]) {
+ return errors.New("checksum can't be specified for non-HTTP sources")
+ }
+ }
+
commitMessage := bytes.NewBufferString("")
if cfg.isAddCommand {
commitMessage.WriteString("ADD")
@@ -1005,7 +1123,34 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error {
for _, src := range cfg.params.SourcePaths {
commitMessage.WriteString(" " + src)
- if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
+ gitRef, gitRefErr := gitutil.ParseGitRef(src)
+ if gitRefErr == nil && !gitRef.IndistinguishableFromLocal {
+ if !cfg.isAddCommand {
+ return errors.New("source can't be a git ref for COPY")
+ }
+ if !addGitEnabled {
+ return errors.New("instruction ADD requires the labs channel")
+ }
+ // TODO: print a warning (not an error) if gitRef.UnencryptedTCP is true
+ commit := gitRef.Commit
+ if gitRef.SubDir != "" {
+ commit += ":" + gitRef.SubDir
+ }
+ var gitOptions []llb.GitOption
+ if cfg.keepGitDir {
+ gitOptions = append(gitOptions, llb.KeepGitDir())
+ }
+ st := llb.Git(gitRef.Remote, commit, gitOptions...)
+ opts := append([]llb.CopyOption{&llb.CopyInfo{
+ Mode: mode,
+ CreateDestPath: true,
+ }}, copyOpt...)
+ if a == nil {
+ a = llb.Copy(st, "/", dest, opts...)
+ } else {
+ a = a.Copy(st, "/", dest, opts...)
+ }
+ } else if isHTTPSource(src) {
if !cfg.isAddCommand {
return errors.New("source can't be a URL for COPY")
}
@@ -1023,7 +1168,7 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error {
}
}
- st := llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params))
+ st := llb.HTTP(src, llb.Filename(f), llb.Checksum(cfg.checksum), dfCmd(cfg.params))
opts := append([]llb.CopyOption{&llb.CopyInfo{
Mode: mode,
@@ -1097,7 +1242,8 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error {
fileOpt = append(fileOpt, llb.IgnoreCache)
}
- if cfg.opt.llbCaps.Supports(pb.CapMergeOp) == nil && cfg.link && cfg.chmod == "" {
+ // cfg.opt.llbCaps can be nil in unit tests
+ if cfg.opt.llbCaps != nil && cfg.opt.llbCaps.Supports(pb.CapMergeOp) == nil && cfg.link && cfg.chmod == "" {
pgID := identity.NewID()
d.cmdIndex-- // prefixCommand increases it
pgName := prefixCommand(d, name, d.prefixPlatform, &platform, env)
@@ -1116,7 +1262,7 @@ func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error {
d.state = d.state.File(a, fileOpt...)
}
- return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
+ return commitToHistory(&d.image, commitMessage.String(), true, &d.state, d.epoch)
}
type copyConfig struct {
@@ -1127,136 +1273,15 @@ type copyConfig struct {
chown string
chmod string
link bool
+ keepGitDir bool
+ checksum digest.Digest
location []parser.Range
opt dispatchOpt
}
-func dispatchCopy(d *dispatchState, cfg copyConfig) error {
- if useFileOp(cfg.opt.buildArgValues, cfg.opt.llbCaps) {
- return dispatchCopyFileOp(d, cfg)
- }
-
- if len(cfg.params.SourceContents) > 0 {
- return errors.New("inline content copy is not supported")
- }
-
- if cfg.chmod != "" {
- if cfg.opt.llbCaps != nil && cfg.opt.llbCaps.Supports(pb.CapFileBase) != nil {
- return errors.Wrap(cfg.opt.llbCaps.Supports(pb.CapFileBase), "chmod is not supported")
- }
- return errors.New("chmod is not supported")
- }
-
- img := llb.Image(cfg.opt.copyImage, llb.MarkImageInternal, llb.Platform(cfg.opt.buildPlatforms[0]), WithInternalName("helper image for file operations"))
- pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath)
- if err != nil {
- return err
- }
- dest := path.Join(".", pp)
- if cfg.params.DestPath == "." || cfg.params.DestPath == "" || cfg.params.DestPath[len(cfg.params.DestPath)-1] == filepath.Separator {
- dest += string(filepath.Separator)
- }
- args := []string{"copy"}
- unpack := cfg.isAddCommand
-
- mounts := make([]llb.RunOption, 0, len(cfg.params.SourcePaths))
- if cfg.chown != "" {
- args = append(args, fmt.Sprintf("--chown=%s", cfg.chown))
- _, _, err := parseUser(cfg.chown)
- if err != nil {
- mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly))
- mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly))
- }
- }
-
- commitMessage := bytes.NewBufferString("")
- if cfg.isAddCommand {
- commitMessage.WriteString("ADD")
- } else {
- commitMessage.WriteString("COPY")
- }
-
- for i, src := range cfg.params.SourcePaths {
- commitMessage.WriteString(" " + src)
- if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
- if !cfg.isAddCommand {
- return errors.New("source can't be a URL for COPY")
- }
-
- // Resources from remote URLs are not decompressed.
- // https://docs.docker.com/engine/reference/builder/#add
- //
- // Note: mixing up remote archives and local archives in a single ADD instruction
- // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717
- unpack = false
- u, err := url.Parse(src)
- f := "__unnamed__"
- if err == nil {
- if base := path.Base(u.Path); base != "." && base != "/" {
- f = base
- }
- }
- target := path.Join(fmt.Sprintf("/src-%d", i), f)
- args = append(args, target)
- mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params)), llb.Readonly))
- } else {
- d, f := splitWildcards(src)
- targetCmd := fmt.Sprintf("/src-%d", i)
- targetMount := targetCmd
- if f == "" {
- f = path.Base(src)
- targetMount = path.Join(targetMount, f)
- }
- targetCmd = path.Join(targetCmd, f)
- args = append(args, targetCmd)
- mounts = append(mounts, llb.AddMount(targetMount, cfg.source, llb.SourcePath(d), llb.Readonly))
- }
- }
-
- commitMessage.WriteString(" " + cfg.params.DestPath)
-
- args = append(args, dest)
- if unpack {
- args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...)
- }
-
- platform := cfg.opt.targetPlatform
- if d.platform != nil {
- platform = *d.platform
- }
-
- env, err := d.state.Env(context.TODO())
- if err != nil {
- return err
- }
-
- runOpt := []llb.RunOption{
- llb.Args(args),
- llb.Dir("/dest"),
- llb.ReadonlyRootFS(),
- dfCmd(cfg.cmdToPrint),
- llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(cfg.opt.shlex, cfg.cmdToPrint.String(), env)), d.prefixPlatform, &platform, env)),
- location(cfg.opt.sourceMap, cfg.location),
- }
- if d.ignoreCache {
- runOpt = append(runOpt, llb.IgnoreCache)
- }
-
- if cfg.opt.llbCaps != nil {
- if err := cfg.opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil {
- runOpt = append(runOpt, llb.Network(llb.NetModeNone))
- }
- }
-
- run := img.Run(append(runOpt, mounts...)...)
- d.state = run.AddMount("/dest", d.state).Platform(platform)
-
- return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
-}
-
func dispatchMaintainer(d *dispatchState, c *instructions.MaintainerCommand) error {
d.image.Author = c.Maintainer
- return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("MAINTAINER %v", c.Maintainer), false, nil, d.epoch)
}
func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error {
@@ -1268,7 +1293,7 @@ func dispatchLabel(d *dispatchState, c *instructions.LabelCommand) error {
d.image.Config.Labels[v.Key] = v.Value
commitMessage.WriteString(" " + v.String())
}
- return commitToHistory(&d.image, commitMessage.String(), false, nil)
+ return commitToHistory(&d.image, commitMessage.String(), false, nil, d.epoch)
}
func dispatchOnbuild(d *dispatchState, c *instructions.OnbuildCommand) error {
@@ -1284,7 +1309,7 @@ func dispatchCmd(d *dispatchState, c *instructions.CmdCommand) error {
d.image.Config.Cmd = args
d.image.Config.ArgsEscaped = true
d.cmdSet = true
- return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("CMD %q", args), false, nil, d.epoch)
}
func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) error {
@@ -1296,18 +1321,18 @@ func dispatchEntrypoint(d *dispatchState, c *instructions.EntrypointCommand) err
if !d.cmdSet {
d.image.Config.Cmd = nil
}
- return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("ENTRYPOINT %q", args), false, nil, d.epoch)
}
func dispatchHealthcheck(d *dispatchState, c *instructions.HealthCheckCommand) error {
- d.image.Config.Healthcheck = &HealthConfig{
+ d.image.Config.Healthcheck = &image.HealthConfig{
Test: c.Health.Test,
Interval: c.Health.Interval,
Timeout: c.Health.Timeout,
StartPeriod: c.Health.StartPeriod,
Retries: c.Health.Retries,
}
- return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("HEALTHCHECK %q", d.image.Config.Healthcheck), false, nil, d.epoch)
}
func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shell.Lex) error {
@@ -1337,14 +1362,14 @@ func dispatchExpose(d *dispatchState, c *instructions.ExposeCommand, shlex *shel
d.image.Config.ExposedPorts[string(p)] = struct{}{}
}
- return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("EXPOSE %v", ps), false, nil, d.epoch)
}
func dispatchUser(d *dispatchState, c *instructions.UserCommand, commit bool) error {
d.state = d.state.User(c.User)
d.image.Config.User = c.User
if commit {
- return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("USER %v", c.User), false, nil, d.epoch)
}
return nil
}
@@ -1359,7 +1384,7 @@ func dispatchVolume(d *dispatchState, c *instructions.VolumeCommand) error {
}
d.image.Config.Volumes[v] = struct{}{}
}
- return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("VOLUME %v", c.Volumes), false, nil, d.epoch)
}
func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) error {
@@ -1367,12 +1392,12 @@ func dispatchStopSignal(d *dispatchState, c *instructions.StopSignalCommand) err
return err
}
d.image.Config.StopSignal = c.Signal
- return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("STOPSIGNAL %v", c.Signal), false, nil, d.epoch)
}
func dispatchShell(d *dispatchState, c *instructions.ShellCommand) error {
d.image.Config.Shell = c.Shell
- return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil)
+ return commitToHistory(&d.image, fmt.Sprintf("SHELL %v", c.Shell), false, nil, d.epoch)
}
func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instructions.KeyValuePairOptional, buildArgValues map[string]string) error {
@@ -1385,21 +1410,34 @@ func dispatchArg(d *dispatchState, c *instructions.ArgCommand, metaArgs []instru
commitStr += "=" + *arg.Value
}
commitStrs = append(commitStrs, commitStr)
+
+ skipArgInfo := false // skip the arg info if the arg is inherited from global scope
if buildArg.Value == nil {
for _, ma := range metaArgs {
if ma.Key == buildArg.Key {
buildArg.Value = ma.Value
+ skipArgInfo = true
}
}
}
+ ai := argInfo{definition: arg, location: c.Location()}
+
if buildArg.Value != nil {
- d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value)
+ if _, ok := nonEnvArgs[buildArg.Key]; !ok {
+ d.state = d.state.AddEnv(buildArg.Key, *buildArg.Value)
+ }
+ ai.value = *buildArg.Value
+ }
+
+ if !skipArgInfo {
+ d.outline.allArgs[arg.Key] = ai
}
+ d.outline.usedArgs[arg.Key] = struct{}{}
d.buildArgs = append(d.buildArgs, buildArg)
}
- return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil)
+ return commitToHistory(&d.image, "ARG "+strings.Join(commitStrs, " "), false, nil, d.epoch)
}
func pathRelativeToWorkingDir(s llb.State, p string) (string, error) {
@@ -1413,27 +1451,6 @@ func pathRelativeToWorkingDir(s llb.State, p string) (string, error) {
return path.Join(dir, p), nil
}
-func splitWildcards(name string) (string, string) {
- i := 0
- for ; i < len(name); i++ {
- ch := name[i]
- if ch == '\\' {
- i++
- } else if ch == '*' || ch == '?' || ch == '[' {
- break
- }
- }
- if i == len(name) {
- return name, ""
- }
-
- base := path.Base(name[:i])
- if name[:i] == "" || strings.HasSuffix(name[:i], string(filepath.Separator)) {
- base = ""
- }
- return path.Dir(name[:i]), base + name[i:]
-}
-
func addEnv(env []string, k, v string) []string {
gotOne := false
for i, envVar := range env {
@@ -1497,7 +1514,7 @@ func runCommandString(args []string, buildArgs []instructions.KeyValuePairOption
return strings.Join(append(tmpBuildEnv, args...), " ")
}
-func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) error {
+func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State, tm *time.Time) error {
if st != nil {
msg += " # buildkit"
}
@@ -1506,6 +1523,7 @@ func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) erro
CreatedBy: msg,
Comment: historyComment,
EmptyLayer: !withLayer,
+ Created: tm,
})
return nil
}
@@ -1525,6 +1543,20 @@ func isReachable(from, to *dispatchState) (ret bool) {
return false
}
+func findReachable(from *dispatchState) (ret []*dispatchState) {
+ if from == nil {
+ return nil
+ }
+ ret = append(ret, from)
+ if from.base != nil {
+ ret = append(ret, findReachable(from.base)...)
+ }
+ for d := range from.deps {
+ ret = append(ret, findReachable(d)...)
+ }
+ return ret
+}
+
func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) {
var visit func(state *dispatchState) bool
if states == nil {
@@ -1560,42 +1592,6 @@ func hasCircularDependency(states []*dispatchState) (bool, *dispatchState) {
return false, nil
}
-func parseUser(str string) (uid uint32, gid uint32, err error) {
- if str == "" {
- return 0, 0, nil
- }
- parts := strings.SplitN(str, ":", 2)
- for i, v := range parts {
- switch i {
- case 0:
- uid, err = parseUID(v)
- if err != nil {
- return 0, 0, err
- }
- if len(parts) == 1 {
- gid = uid
- }
- case 1:
- gid, err = parseUID(v)
- if err != nil {
- return 0, 0, err
- }
- }
- }
- return
-}
-
-func parseUID(str string) (uint32, error) {
- if str == "root" {
- return 0, nil
- }
- uid, err := strconv.ParseUint(str, 10, 32)
- if err != nil {
- return 0, err
- }
- return uint32(uid), nil
-}
-
func normalizeContextPaths(paths map[string]struct{}) []string {
pathSlice := make([]string, 0, len(paths))
for p := range paths {
@@ -1760,16 +1756,6 @@ func platformFromEnv(env []string) *ocispecs.Platform {
return &p
}
-func useFileOp(args map[string]string, caps *apicaps.CapSet) bool {
- enabled := true
- if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok {
- if b, err := strconv.ParseBool(v); err == nil {
- enabled = !b
- }
- }
- return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil
-}
-
func location(sm *llb.SourceMap, locations []parser.Range) llb.ConstraintsOpt {
loc := make([]*pb.Range, 0, len(locations))
for _, l := range locations {
@@ -1807,3 +1793,36 @@ func commonImageNames() []string {
}
return out
}
+
+func clampTimes(img Image, tm *time.Time) Image {
+ if tm == nil {
+ return img
+ }
+ for i, h := range img.History {
+ if h.Created == nil || h.Created.After(*tm) {
+ img.History[i].Created = tm
+ }
+ }
+ if img.Created != nil && img.Created.After(*tm) {
+ img.Created = tm
+ }
+ return img
+}
+
+func isHTTPSource(src string) bool {
+ return strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://")
+}
+
+func isEnabledForStage(stage string, value string) bool {
+ if enabled, err := strconv.ParseBool(value); err == nil {
+ return enabled
+ }
+
+ vv := strings.Split(value, ",")
+ for _, v := range vv {
+ if v == stage {
+ return true
+ }
+ }
+ return false
+}
diff --git a/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go b/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go
new file mode 100644
index 000000000000..4506baeb8ba8
--- /dev/null
+++ b/frontend/dockerfile/dockerfile2llb/convert_addchecksum.go
@@ -0,0 +1,6 @@
+//go:build dfaddchecksum
+// +build dfaddchecksum
+
+package dockerfile2llb
+
+const addChecksumEnabled = true
diff --git a/frontend/dockerfile/dockerfile2llb/convert_addgit.go b/frontend/dockerfile/dockerfile2llb/convert_addgit.go
new file mode 100644
index 000000000000..9ccb7a20e840
--- /dev/null
+++ b/frontend/dockerfile/dockerfile2llb/convert_addgit.go
@@ -0,0 +1,6 @@
+//go:build dfaddgit
+// +build dfaddgit
+
+package dockerfile2llb
+
+const addGitEnabled = true
diff --git a/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go b/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go
new file mode 100644
index 000000000000..8de035297c1b
--- /dev/null
+++ b/frontend/dockerfile/dockerfile2llb/convert_noaddchecksum.go
@@ -0,0 +1,6 @@
+//go:build !dfaddchecksum
+// +build !dfaddchecksum
+
+package dockerfile2llb
+
+const addChecksumEnabled = false
diff --git a/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go b/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go
new file mode 100644
index 000000000000..119bb32c8895
--- /dev/null
+++ b/frontend/dockerfile/dockerfile2llb/convert_noaddgit.go
@@ -0,0 +1,6 @@
+//go:build !dfaddgit
+// +build !dfaddgit
+
+package dockerfile2llb
+
+const addGitEnabled = false
diff --git a/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/frontend/dockerfile/dockerfile2llb/convert_runmount.go
index 7777fba91ac9..1015590a0dc6 100644
--- a/frontend/dockerfile/dockerfile2llb/convert_runmount.go
+++ b/frontend/dockerfile/dockerfile2llb/convert_runmount.go
@@ -2,12 +2,9 @@ package dockerfile2llb
import (
"context"
- "fmt"
"os"
"path"
"path/filepath"
- "strconv"
- "strings"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
@@ -46,7 +43,7 @@ func detectRunMount(cmd *command, allDispatchStates *dispatchStates) bool {
return false
}
-func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State {
+func setCacheUIDGID(m *instructions.Mount, st llb.State) llb.State {
uid := 0
gid := 0
mode := os.FileMode(0755)
@@ -62,24 +59,6 @@ func setCacheUIDGIDFileOp(m *instructions.Mount, st llb.State) llb.State {
return st.File(llb.Mkdir("/cache", mode, llb.WithUIDGID(uid, gid)), llb.WithCustomName("[internal] settings cache mount permissions"))
}
-func setCacheUIDGID(m *instructions.Mount, st llb.State, fileop bool) llb.State {
- if fileop {
- return setCacheUIDGIDFileOp(m, st)
- }
-
- var b strings.Builder
- if m.UID != nil {
- b.WriteString(fmt.Sprintf("chown %d /mnt/cache;", *m.UID))
- }
- if m.GID != nil {
- b.WriteString(fmt.Sprintf("chown :%d /mnt/cache;", *m.GID))
- }
- if m.Mode != nil {
- b.WriteString(fmt.Sprintf("chmod %s /mnt/cache;", strconv.FormatUint(*m.Mode, 8)))
- }
- return llb.Image("busybox").Run(llb.Shlex(fmt.Sprintf("sh -c 'mkdir -p /mnt/cache;%s'", b.String())), llb.WithCustomName("[internal] settings cache mount permissions")).AddMount("/mnt", st)
-}
-
func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*dispatchState, opt dispatchOpt) ([]llb.RunOption, error) {
var out []llb.RunOption
mounts := instructions.GetMounts(c)
@@ -100,7 +79,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
))
}
if mount.Type == instructions.MountTypeSecret {
- secret, err := dispatchSecret(mount)
+ secret, err := dispatchSecret(d, mount, c.Location())
if err != nil {
return nil, err
}
@@ -108,7 +87,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
continue
}
if mount.Type == instructions.MountTypeSSH {
- ssh, err := dispatchSSH(mount)
+ ssh, err := dispatchSSH(d, mount, c.Location())
if err != nil {
return nil, err
}
@@ -148,7 +127,7 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []*
mountOpts = append(mountOpts, llb.SourcePath(src))
} else {
if mount.UID != nil || mount.GID != nil || mount.Mode != nil {
- st = setCacheUIDGID(mount, st, useFileOp(opt.buildArgValues, opt.llbCaps))
+ st = setCacheUIDGID(mount, st)
mountOpts = append(mountOpts, llb.SourcePath("/cache"))
}
}
diff --git a/frontend/dockerfile/dockerfile2llb/convert_secrets.go b/frontend/dockerfile/dockerfile2llb/convert_secrets.go
index 2c88a5e4f7e7..ced2bff1b070 100644
--- a/frontend/dockerfile/dockerfile2llb/convert_secrets.go
+++ b/frontend/dockerfile/dockerfile2llb/convert_secrets.go
@@ -5,10 +5,11 @@ import (
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
+ "github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/pkg/errors"
)
-func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) {
+func dispatchSecret(d *dispatchState, m *instructions.Mount, loc []parser.Range) (llb.RunOption, error) {
id := m.CacheID
if m.Source != "" {
id = m.Source
@@ -26,6 +27,13 @@ func dispatchSecret(m *instructions.Mount) (llb.RunOption, error) {
target = "/run/secrets/" + path.Base(id)
}
+ if _, ok := d.outline.secrets[id]; !ok {
+ d.outline.secrets[id] = secretInfo{
+ location: loc,
+ required: m.Required,
+ }
+ }
+
opts := []llb.SecretOption{llb.SecretID(id)}
if !m.Required {
diff --git a/frontend/dockerfile/dockerfile2llb/convert_ssh.go b/frontend/dockerfile/dockerfile2llb/convert_ssh.go
index b55659d97883..ab7aaa60127f 100644
--- a/frontend/dockerfile/dockerfile2llb/convert_ssh.go
+++ b/frontend/dockerfile/dockerfile2llb/convert_ssh.go
@@ -3,13 +3,26 @@ package dockerfile2llb
import (
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
+ "github.com/moby/buildkit/frontend/dockerfile/parser"
"github.com/pkg/errors"
)
-func dispatchSSH(m *instructions.Mount) (llb.RunOption, error) {
+func dispatchSSH(d *dispatchState, m *instructions.Mount, loc []parser.Range) (llb.RunOption, error) {
if m.Source != "" {
return nil, errors.Errorf("ssh does not support source")
}
+
+ id := m.CacheID
+ if id == "" {
+ id = "default"
+ }
+ if _, ok := d.outline.ssh[id]; !ok {
+ d.outline.ssh[id] = sshInfo{
+ location: loc,
+ required: m.Required,
+ }
+ }
+
opts := []llb.SSHOption{llb.SSHID(m.CacheID)}
if m.Target != "" {
diff --git a/frontend/dockerfile/dockerfile2llb/convert_test.go b/frontend/dockerfile/dockerfile2llb/convert_test.go
index 5c1817addf91..ffcfee86fc15 100644
--- a/frontend/dockerfile/dockerfile2llb/convert_test.go
+++ b/frontend/dockerfile/dockerfile2llb/convert_test.go
@@ -1,16 +1,12 @@
package dockerfile2llb
import (
- "strings"
"testing"
"github.com/moby/buildkit/frontend/dockerfile/instructions"
"github.com/moby/buildkit/frontend/dockerfile/shell"
"github.com/moby/buildkit/util/appcontext"
- binfotypes "github.com/moby/buildkit/util/buildinfo/types"
- ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
func toEnvMap(args []instructions.KeyValuePairOptional, env []string) map[string]string {
@@ -192,30 +188,3 @@ COPY --from=stage1 f2 /sub/
_, _, _, err = Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{})
assert.EqualError(t, err, "circular dependency detected on stage: stage0")
}
-
-// moby/buildkit#2311
-func TestTargetBuildInfo(t *testing.T) {
- df := `
-FROM busybox
-ADD https://raw.githubusercontent.com/moby/buildkit/master/README.md /
-`
- _, _, bi, err := Dockerfile2LLB(appcontext.Context(), []byte(df), ConvertOpt{
- TargetPlatform: &ocispecs.Platform{
- Architecture: "amd64",
- OS: "linux",
- },
- BuildPlatforms: []ocispecs.Platform{
- {
- Architecture: "amd64",
- OS: "linux",
- },
- },
- })
- require.NoError(t, err)
-
- require.Equal(t, 1, len(bi.Sources))
- assert.Equal(t, binfotypes.SourceTypeDockerImage, bi.Sources[0].Type)
- assert.Equal(t, "busybox", bi.Sources[0].Ref)
- assert.True(t, strings.HasPrefix(bi.Sources[0].Alias, "docker.io/library/busybox@"))
- assert.NotEmpty(t, bi.Sources[0].Pin)
-}
diff --git a/frontend/dockerfile/dockerfile2llb/directives.go b/frontend/dockerfile/dockerfile2llb/directives.go
deleted file mode 100644
index 3cf982b9a9b3..000000000000
--- a/frontend/dockerfile/dockerfile2llb/directives.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package dockerfile2llb
-
-import (
- "bufio"
- "io"
- "regexp"
- "strings"
-
- "github.com/moby/buildkit/frontend/dockerfile/parser"
-)
-
-const keySyntax = "syntax"
-
-var reDirective = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`)
-
-type Directive struct {
- Name string
- Value string
- Location []parser.Range
-}
-
-func DetectSyntax(r io.Reader) (string, string, []parser.Range, bool) {
- directives := ParseDirectives(r)
- if len(directives) == 0 {
- return "", "", nil, false
- }
- v, ok := directives[keySyntax]
- if !ok {
- return "", "", nil, false
- }
- p := strings.SplitN(v.Value, " ", 2)
- return p[0], v.Value, v.Location, true
-}
-
-func ParseDirectives(r io.Reader) map[string]Directive {
- m := map[string]Directive{}
- s := bufio.NewScanner(r)
- var l int
- for s.Scan() {
- l++
- match := reDirective.FindStringSubmatch(s.Text())
- if len(match) == 0 {
- return m
- }
- m[strings.ToLower(match[1])] = Directive{
- Name: match[1],
- Value: match[2],
- Location: []parser.Range{{
- Start: parser.Position{Line: l},
- End: parser.Position{Line: l},
- }},
- }
- }
- return m
-}
diff --git a/frontend/dockerfile/dockerfile2llb/directives_test.go b/frontend/dockerfile/dockerfile2llb/directives_test.go
deleted file mode 100644
index 6f45b2903111..000000000000
--- a/frontend/dockerfile/dockerfile2llb/directives_test.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package dockerfile2llb
-
-import (
- "bytes"
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestDirectives(t *testing.T) {
- t.Parallel()
-
- dt := `#escape=\
-# key = FOO bar
-
-# smth
-`
-
- d := ParseDirectives(bytes.NewBuffer([]byte(dt)))
- require.Equal(t, len(d), 2, fmt.Sprintf("%+v", d))
-
- v, ok := d["escape"]
- require.True(t, ok)
- require.Equal(t, v.Value, "\\")
-
- v, ok = d["key"]
- require.True(t, ok)
- require.Equal(t, v.Value, "FOO bar")
-
- // for some reason Moby implementation in case insensitive for escape
- dt = `# EScape=\
-# KEY = FOO bar
-
-# smth
-`
-
- d = ParseDirectives(bytes.NewBuffer([]byte(dt)))
- require.Equal(t, len(d), 2, fmt.Sprintf("%+v", d))
-
- v, ok = d["escape"]
- require.True(t, ok)
- require.Equal(t, v.Value, "\\")
-
- v, ok = d["key"]
- require.True(t, ok)
- require.Equal(t, v.Value, "FOO bar")
-}
-
-func TestSyntaxDirective(t *testing.T) {
- t.Parallel()
-
- dt := `# syntax = dockerfile:experimental // opts
-FROM busybox
-`
-
- ref, cmdline, loc, ok := DetectSyntax(bytes.NewBuffer([]byte(dt)))
- require.True(t, ok)
- require.Equal(t, ref, "dockerfile:experimental")
- require.Equal(t, cmdline, "dockerfile:experimental // opts")
- require.Equal(t, 1, loc[0].Start.Line)
- require.Equal(t, 1, loc[0].End.Line)
-
- dt = `FROM busybox
-RUN ls
-`
- ref, cmdline, _, ok = DetectSyntax(bytes.NewBuffer([]byte(dt)))
- require.False(t, ok)
- require.Equal(t, ref, "")
- require.Equal(t, cmdline, "")
-}
diff --git a/frontend/dockerfile/dockerfile2llb/image.go b/frontend/dockerfile/dockerfile2llb/image.go
index d4c82700e3e4..36b27aa28aba 100644
--- a/frontend/dockerfile/dockerfile2llb/image.go
+++ b/frontend/dockerfile/dockerfile2llb/image.go
@@ -1,59 +1,14 @@
package dockerfile2llb
import (
- "time"
-
- "github.com/docker/docker/api/types/strslice"
+ "github.com/moby/buildkit/exporter/containerimage/image"
"github.com/moby/buildkit/util/system"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
-// HealthConfig holds configuration settings for the HEALTHCHECK feature.
-type HealthConfig struct {
- // Test is the test to perform to check that the container is healthy.
- // An empty slice means to inherit the default.
- // The options are:
- // {} : inherit healthcheck
- // {"NONE"} : disable healthcheck
- // {"CMD", args...} : exec arguments directly
- // {"CMD-SHELL", command} : run command with system's default shell
- Test []string `json:",omitempty"`
-
- // Zero means to inherit. Durations are expressed as integer nanoseconds.
- Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
- Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
- StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
-
- // Retries is the number of consecutive failures needed to consider a container as unhealthy.
- // Zero means inherit.
- Retries int `json:",omitempty"`
-}
-
-// ImageConfig is a docker compatible config for an image
-type ImageConfig struct {
- ocispecs.ImageConfig
-
- Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
- ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
-
- // NetworkDisabled bool `json:",omitempty"` // Is network disabled
- // MacAddress string `json:",omitempty"` // Mac Address of the container
- OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
- StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
- Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
-}
-
// Image is the JSON structure which describes some basic information about the image.
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
-type Image struct {
- ocispecs.Image
-
- // Config defines the execution parameters which should be used as a base when running a container using the image.
- Config ImageConfig `json:"config,omitempty"`
-
- // Variant defines platform variant. To be added to OCI.
- Variant string `json:"variant,omitempty"`
-}
+type Image image.Image
func clone(src Image) Image {
img := src
@@ -69,8 +24,8 @@ func emptyImage(platform ocispecs.Platform) Image {
Image: ocispecs.Image{
Architecture: platform.Architecture,
OS: platform.OS,
+ Variant: platform.Variant,
},
- Variant: platform.Variant,
}
img.RootFS.Type = "layers"
img.Config.WorkingDir = "/"
diff --git a/frontend/dockerfile/dockerfile2llb/outline.go b/frontend/dockerfile/dockerfile2llb/outline.go
new file mode 100644
index 000000000000..f93c8961b2ec
--- /dev/null
+++ b/frontend/dockerfile/dockerfile2llb/outline.go
@@ -0,0 +1,210 @@
+package dockerfile2llb
+
+import (
+ "sort"
+
+ "github.com/moby/buildkit/frontend/dockerfile/instructions"
+ "github.com/moby/buildkit/frontend/dockerfile/parser"
+ "github.com/moby/buildkit/frontend/subrequests/outline"
+ pb "github.com/moby/buildkit/solver/pb"
+)
+
+type outlineCapture struct {
+ allArgs map[string]argInfo
+ usedArgs map[string]struct{}
+ secrets map[string]secretInfo
+ ssh map[string]sshInfo
+}
+
+type argInfo struct {
+ value string
+ definition instructions.KeyValuePairOptional
+ deps map[string]struct{}
+ location []parser.Range
+}
+
+type secretInfo struct {
+ required bool
+ location []parser.Range
+}
+
+type sshInfo struct {
+ required bool
+ location []parser.Range
+}
+
+func newOutlineCapture() outlineCapture {
+ return outlineCapture{
+ allArgs: map[string]argInfo{},
+ usedArgs: map[string]struct{}{},
+ secrets: map[string]secretInfo{},
+ ssh: map[string]sshInfo{},
+ }
+}
+
+func (o outlineCapture) clone() outlineCapture {
+ allArgs := map[string]argInfo{}
+ for k, v := range o.allArgs {
+ allArgs[k] = v
+ }
+ usedArgs := map[string]struct{}{}
+ for k := range o.usedArgs {
+ usedArgs[k] = struct{}{}
+ }
+ secrets := map[string]secretInfo{}
+ for k, v := range o.secrets {
+ secrets[k] = v
+ }
+ ssh := map[string]sshInfo{}
+ for k, v := range o.ssh {
+ ssh[k] = v
+ }
+ return outlineCapture{
+ allArgs: allArgs,
+ usedArgs: usedArgs,
+ secrets: secrets,
+ ssh: ssh,
+ }
+}
+
+func (o outlineCapture) markAllUsed(in map[string]struct{}) {
+ for k := range in {
+ if a, ok := o.allArgs[k]; ok {
+ o.markAllUsed(a.deps)
+ }
+ o.usedArgs[k] = struct{}{}
+ }
+}
+
+func (ds *dispatchState) args(visited map[string]struct{}) []outline.Arg {
+ ds.outline.markAllUsed(ds.outline.usedArgs)
+
+ args := make([]outline.Arg, 0, len(ds.outline.usedArgs))
+ for k := range ds.outline.usedArgs {
+ if a, ok := ds.outline.allArgs[k]; ok {
+ if _, ok := visited[k]; !ok {
+ args = append(args, outline.Arg{
+ Name: a.definition.Key,
+ Value: a.value,
+ Description: a.definition.Comment,
+ Location: toSourceLocation(a.location),
+ })
+ visited[k] = struct{}{}
+ }
+ }
+ }
+
+ if ds.base != nil {
+ args = append(args, ds.base.args(visited)...)
+ }
+ for d := range ds.deps {
+ args = append(args, d.args(visited)...)
+ }
+
+ return args
+}
+
+func (ds *dispatchState) secrets(visited map[string]struct{}) []outline.Secret {
+ secrets := make([]outline.Secret, 0, len(ds.outline.secrets))
+ for k, v := range ds.outline.secrets {
+ if _, ok := visited[k]; !ok {
+ secrets = append(secrets, outline.Secret{
+ Name: k,
+ Required: v.required,
+ Location: toSourceLocation(v.location),
+ })
+ visited[k] = struct{}{}
+ }
+ }
+ if ds.base != nil {
+ secrets = append(secrets, ds.base.secrets(visited)...)
+ }
+ for d := range ds.deps {
+ secrets = append(secrets, d.secrets(visited)...)
+ }
+ return secrets
+}
+
+func (ds *dispatchState) ssh(visited map[string]struct{}) []outline.SSH {
+ ssh := make([]outline.SSH, 0, len(ds.outline.secrets))
+ for k, v := range ds.outline.ssh {
+ if _, ok := visited[k]; !ok {
+ ssh = append(ssh, outline.SSH{
+ Name: k,
+ Required: v.required,
+ Location: toSourceLocation(v.location),
+ })
+ visited[k] = struct{}{}
+ }
+ }
+ if ds.base != nil {
+ ssh = append(ssh, ds.base.ssh(visited)...)
+ }
+ for d := range ds.deps {
+ ssh = append(ssh, d.ssh(visited)...)
+ }
+ return ssh
+}
+
+func (ds *dispatchState) Outline(dt []byte) outline.Outline {
+ args := ds.args(map[string]struct{}{})
+ sort.Slice(args, func(i, j int) bool {
+ return compLocation(args[i].Location, args[j].Location)
+ })
+
+ secrets := ds.secrets(map[string]struct{}{})
+ sort.Slice(secrets, func(i, j int) bool {
+ return compLocation(secrets[i].Location, secrets[j].Location)
+ })
+
+ ssh := ds.ssh(map[string]struct{}{})
+ sort.Slice(ssh, func(i, j int) bool {
+ return compLocation(ssh[i].Location, ssh[j].Location)
+ })
+
+ out := outline.Outline{
+ Name: ds.stage.Name,
+ Description: ds.stage.Comment,
+ Sources: [][]byte{dt},
+ Args: args,
+ Secrets: secrets,
+ SSH: ssh,
+ }
+
+ return out
+}
+
+func toSourceLocation(r []parser.Range) *pb.Location {
+ if len(r) == 0 {
+ return nil
+ }
+ arr := make([]*pb.Range, len(r))
+ for i, r := range r {
+ arr[i] = &pb.Range{
+ Start: pb.Position{
+ Line: int32(r.Start.Line),
+ Character: int32(r.Start.Character),
+ },
+ End: pb.Position{
+ Line: int32(r.End.Line),
+ Character: int32(r.End.Character),
+ },
+ }
+ }
+ return &pb.Location{Ranges: arr}
+}
+
+func compLocation(a, b *pb.Location) bool {
+ if a.SourceIndex != b.SourceIndex {
+ return a.SourceIndex < b.SourceIndex
+ }
+ linea := 0
+ lineb := 0
+ if len(a.Ranges) > 0 {
+ linea = int(a.Ranges[0].Start.Line)
+ }
+ if len(b.Ranges) > 0 {
+ lineb = int(b.Ranges[0].Start.Line)
+ }
+ return linea < lineb
+}
diff --git a/frontend/dockerfile/dockerfile_addchecksum_test.go b/frontend/dockerfile/dockerfile_addchecksum_test.go
new file mode 100644
index 000000000000..f34cf31a5038
--- /dev/null
+++ b/frontend/dockerfile/dockerfile_addchecksum_test.go
@@ -0,0 +1,175 @@
+//go:build dfaddchecksum
+// +build dfaddchecksum
+
+package dockerfile
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/containerd/continuity/fs/fstest"
+ "github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/frontend/dockerfile/builder"
+ "github.com/moby/buildkit/identity"
+ "github.com/moby/buildkit/util/testutil/httpserver"
+ "github.com/moby/buildkit/util/testutil/integration"
+ digest "github.com/opencontainers/go-digest"
+ "github.com/stretchr/testify/require"
+)
+
+var addChecksumTests = integration.TestFuncs(
+ testAddChecksum,
+)
+
+func init() {
+ allTests = append(allTests, addChecksumTests...)
+}
+
+func testAddChecksum(t *testing.T, sb integration.Sandbox) {
+ f := getFrontend(t, sb)
+ f.RequiresBuildctl(t)
+
+ resp := httpserver.Response{
+ Etag: identity.NewID(),
+ Content: []byte("content1"),
+ }
+ server := httpserver.NewTestServer(map[string]httpserver.Response{
+ "/foo": resp,
+ })
+ defer server.Close()
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ t.Run("Valid", func(t *testing.T) {
+ dockerfile := []byte(fmt.Sprintf(`
+FROM scratch
+ADD --checksum=%s %s /tmp/foo
+`, digest.FromBytes(resp.Content).String(), server.URL+"/foo"))
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.NoError(t, err)
+ })
+ t.Run("DigestFromEnv", func(t *testing.T) {
+ dockerfile := []byte(fmt.Sprintf(`
+FROM scratch
+ENV DIGEST=%s
+ENV LINK=%s
+ADD --checksum=${DIGEST} ${LINK} /tmp/foo
+`, digest.FromBytes(resp.Content).String(), server.URL+"/foo"))
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.NoError(t, err)
+ })
+ t.Run("DigestMismatch", func(t *testing.T) {
+ dockerfile := []byte(fmt.Sprintf(`
+FROM scratch
+ADD --checksum=%s %s /tmp/foo
+`, digest.FromBytes(nil).String(), server.URL+"/foo"))
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.Error(t, err, "digest mismatch")
+ })
+ t.Run("DigestWithKnownButUnsupportedAlgoName", func(t *testing.T) {
+ dockerfile := []byte(fmt.Sprintf(`
+FROM scratch
+ADD --checksum=md5:7e55db001d319a94b0b713529a756623 %s /tmp/foo
+`, server.URL+"/foo"))
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.Error(t, err, "unsupported digest algorithm")
+ })
+ t.Run("DigestWithUnknownAlgoName", func(t *testing.T) {
+ dockerfile := []byte(fmt.Sprintf(`
+FROM scratch
+ADD --checksum=unknown:%s %s /tmp/foo
+`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo"))
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.Error(t, err, "unsupported digest algorithm")
+ })
+ t.Run("DigestWithoutAlgoName", func(t *testing.T) {
+ dockerfile := []byte(fmt.Sprintf(`
+FROM scratch
+ADD --checksum=%s %s /tmp/foo
+`, digest.FromBytes(resp.Content).Encoded(), server.URL+"/foo"))
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.Error(t, err, "invalid checksum digest format")
+ })
+ t.Run("NonHTTPSource", func(t *testing.T) {
+ foo := []byte("local file")
+ dockerfile := []byte(fmt.Sprintf(`
+FROM scratch
+ADD --checksum=%s foo /tmp/foo
+`, digest.FromBytes(foo).String()))
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("foo", foo, 0600),
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.Error(t, err, "checksum can't be specified for non-HTTP sources")
+ })
+}
diff --git a/frontend/dockerfile/dockerfile_addgit_test.go b/frontend/dockerfile/dockerfile_addgit_test.go
new file mode 100644
index 000000000000..fa99dea5648c
--- /dev/null
+++ b/frontend/dockerfile/dockerfile_addgit_test.go
@@ -0,0 +1,115 @@
+//go:build dfaddgit
+// +build dfaddgit
+
+package dockerfile
+
+import (
+ "bytes"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "testing"
+ "text/template"
+
+ "github.com/containerd/continuity/fs/fstest"
+ "github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/frontend/dockerfile/builder"
+ "github.com/moby/buildkit/util/testutil/integration"
+ "github.com/stretchr/testify/require"
+)
+
+var addGitTests = integration.TestFuncs(
+ testAddGit,
+)
+
+func init() {
+ allTests = append(allTests, addGitTests...)
+}
+
+func testAddGit(t *testing.T, sb integration.Sandbox) {
+ f := getFrontend(t, sb)
+
+ gitDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(gitDir)
+ gitCommands := []string{
+ "git init",
+ "git config --local user.email test",
+ "git config --local user.name test",
+ }
+ makeCommit := func(tag string) []string {
+ return []string{
+ "echo foo of " + tag + " >foo",
+ "git add foo",
+ "git commit -m " + tag,
+ "git tag " + tag,
+ }
+ }
+ gitCommands = append(gitCommands, makeCommit("v0.0.1")...)
+ gitCommands = append(gitCommands, makeCommit("v0.0.2")...)
+ gitCommands = append(gitCommands, makeCommit("v0.0.3")...)
+ gitCommands = append(gitCommands, "git update-server-info")
+ err = runShell(gitDir, gitCommands...)
+ require.NoError(t, err)
+
+ server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir))))
+ defer server.Close()
+ serverURL := server.URL
+ t.Logf("serverURL=%q", serverURL)
+
+ dockerfile, err := applyTemplate(`
+FROM alpine
+
+# Basic case
+ADD {{.ServerURL}}/.git#v0.0.1 /x
+RUN cd /x && \
+ [ "$(cat foo)" = "foo of v0.0.1" ]
+
+# Complicated case
+ARG REPO="{{.ServerURL}}/.git"
+ARG TAG="v0.0.2"
+ADD --keep-git-dir=true --chown=4242:8484 ${REPO}#${TAG} /buildkit-chowned
+RUN apk add git
+USER 4242
+RUN cd /buildkit-chowned && \
+ [ "$(cat foo)" = "foo of v0.0.2" ] && \
+ [ "$(stat -c %u foo)" = "4242" ] && \
+ [ "$(stat -c %g foo)" = "8484" ] && \
+ [ -z "$(git status -s)" ]
+`, map[string]string{
+ "ServerURL": serverURL,
+ })
+ require.NoError(t, err)
+ t.Logf("dockerfile=%s", dockerfile)
+
+ dir, err := integration.Tmpdir(t,
+ fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
+ )
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.NoError(t, err)
+}
+
+func applyTemplate(tmpl string, x interface{}) (string, error) {
+ var buf bytes.Buffer
+ parsed, err := template.New("").Parse(tmpl)
+ if err != nil {
+ return "", err
+ }
+ if err := parsed.Execute(&buf, x); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
diff --git a/frontend/dockerfile/dockerfile_buildinfo_test.go b/frontend/dockerfile/dockerfile_buildinfo_test.go
index 84e0c24622d5..ea3705920972 100644
--- a/frontend/dockerfile/dockerfile_buildinfo_test.go
+++ b/frontend/dockerfile/dockerfile_buildinfo_test.go
@@ -5,7 +5,7 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/http/httptest"
"os"
@@ -47,18 +47,16 @@ func testBuildInfoSources(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
- gitDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(gitDir)
+ gitDir := t.TempDir()
dockerfile := `
FROM alpine:latest@sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300 AS alpine
FROM busybox:latest
-ADD https://raw.githubusercontent.com/moby/moby/master/README.md /
+ADD https://user2:pw2@raw.githubusercontent.com/moby/moby/v20.10.21/README.md /
COPY --from=alpine /bin/busybox /alpine-busybox
`
- err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
+ err := os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
require.NoError(t, err)
err = runShell(gitDir,
@@ -75,27 +73,35 @@ COPY --from=alpine /bin/busybox /alpine-busybox
server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir))))
defer server.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
-
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- res, err := f.Solve(sb.Context(), c, client.SolveOpt{
- Exports: []client.ExportEntry{
- {
- Type: client.ExporterOCI,
- Output: fixedWriteCloser(outW),
+ var exports []client.ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []client.ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": "reg.dummy:5000/buildkit/test:latest",
},
- },
+ }}
+ } else {
+ exports = []client.ExportEntry{{
+ Type: client.ExporterOCI,
+ Attrs: map[string]string{},
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
+ }}
+ }
+
+ expectedURL := strings.Replace(server.URL, "http://", "http://xxxxx:xxxxx@", 1)
+ require.NotEqual(t, expectedURL, server.URL)
+ server.URL = strings.Replace(server.URL, "http://", "http://user:pass@", 1)
+
+ res, err := f.Solve(sb.Context(), c, client.SolveOpt{
+ Exports: exports,
FrontendAttrs: map[string]string{
- builder.DefaultLocalNameContext: server.URL + "/.git#buildinfo",
+ builder.DefaultLocalNameContext: server.URL + "/.git#buildinfo",
+ builder.DefaultLocalNameContext + ":foo": "https://foo:bar@example.invalid/foo.html",
},
}, nil)
require.NoError(t, err)
@@ -109,11 +115,20 @@ COPY --from=alpine /bin/busybox /alpine-busybox
require.NoError(t, err)
require.Contains(t, bi.Attrs, "context")
- require.Equal(t, server.URL+"/.git#buildinfo", *bi.Attrs["context"])
+ require.Equal(t, expectedURL+"/.git#buildinfo", *bi.Attrs["context"])
- sources := bi.Sources
- require.Equal(t, 3, len(sources))
+ require.Equal(t, "https://xxxxx:xxxxx@example.invalid/foo.html", *bi.Attrs["context:foo"])
+
+ _, isGateway := f.(*gatewayFrontend)
+ sources := bi.Sources
+ if isGateway {
+ require.Equal(t, 5, len(sources), "%+v", sources)
+ assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type)
+ assert.Contains(t, sources[0].Ref, "buildkit_test")
+ sources = sources[1:]
+ }
+ require.Equal(t, 4, len(sources), "%+v", sources)
assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type)
assert.Equal(t, "docker.io/library/alpine:latest@sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300", sources[0].Ref)
assert.Equal(t, "sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300", sources[0].Pin)
@@ -122,9 +137,13 @@ COPY --from=alpine /bin/busybox /alpine-busybox
assert.Equal(t, "docker.io/library/busybox:latest", sources[1].Ref)
assert.NotEmpty(t, sources[1].Pin)
- assert.Equal(t, binfotypes.SourceTypeHTTP, sources[2].Type)
- assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", sources[2].Ref)
- assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[2].Pin)
+ assert.Equal(t, binfotypes.SourceTypeGit, sources[2].Type)
+ assert.Equal(t, expectedURL+"/.git#buildinfo", sources[2].Ref)
+ assert.NotEmpty(t, sources[2].Pin)
+
+ assert.Equal(t, binfotypes.SourceTypeHTTP, sources[3].Type)
+ assert.Equal(t, "https://xxxxx:xxxxx@raw.githubusercontent.com/moby/moby/v20.10.21/README.md", sources[3].Ref)
+ assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[3].Pin)
}
func testBuildInfoSourcesNoop(t *testing.T, sb integration.Sandbox) {
@@ -135,31 +154,34 @@ func testBuildInfoSourcesNoop(t *testing.T, sb integration.Sandbox) {
FROM busybox:latest
`
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
+ var exports []client.ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []client.ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": "reg.dummy:5000/buildkit/test:latest",
+ },
+ }}
+ } else {
+ exports = []client.ExportEntry{{
+ Type: client.ExporterOCI,
+ Attrs: map[string]string{},
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
+ }}
+ }
res, err := f.Solve(sb.Context(), c, client.SolveOpt{
- Exports: []client.ExportEntry{
- {
- Type: client.ExporterOCI,
- Output: fixedWriteCloser(outW),
- },
- },
+ Exports: exports,
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -176,8 +198,12 @@ FROM busybox:latest
require.NoError(t, err)
sources := bi.Sources
- require.Equal(t, 1, len(sources))
+ if _, isGateway := f.(*gatewayFrontend); isGateway {
+ require.Equal(t, 2, len(sources), "%+v", sources)
+ sources = sources[1:]
+ }
+ require.Equal(t, 1, len(sources))
assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type)
assert.Equal(t, "docker.io/library/busybox:latest", sources[0].Ref)
assert.NotEmpty(t, sources[0].Pin)
@@ -194,34 +220,37 @@ ARG foo
RUN echo $foo
`
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
+ var exports []client.ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []client.ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": "reg.dummy:5000/buildkit/test:latest",
+ },
+ }}
+ } else {
+ exports = []client.ExportEntry{{
+ Type: client.ExporterOCI,
+ Attrs: map[string]string{},
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
+ }}
+ }
res, err := f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
"build-arg:foo": "bar",
},
- Exports: []client.ExportEntry{
- {
- Type: client.ExporterOCI,
- Output: fixedWriteCloser(outW),
- },
- },
+ Exports: exports,
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -243,6 +272,7 @@ RUN echo $foo
// moby/buildkit#2476
func testBuildInfoMultiPlatform(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureMultiPlatform)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -250,27 +280,19 @@ func testBuildInfoMultiPlatform(t *testing.T, sb integration.Sandbox) {
FROM busybox:latest
ARG foo
RUN echo $foo
-ADD https://raw.githubusercontent.com/moby/moby/master/README.md /
+ADD https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md /
`
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
-
platforms := []string{"linux/amd64", "linux/arm64"}
res, err := f.Solve(sb.Context(), c, client.SolveOpt{
@@ -281,7 +303,7 @@ ADD https://raw.githubusercontent.com/moby/moby/master/README.md /
Exports: []client.ExportEntry{
{
Type: client.ExporterOCI,
- Output: fixedWriteCloser(outW),
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
},
},
LocalDirs: map[string]string{
@@ -303,15 +325,21 @@ ADD https://raw.githubusercontent.com/moby/moby/master/README.md /
require.Contains(t, bi.Attrs, "build-arg:foo")
require.Equal(t, "bar", *bi.Attrs["build-arg:foo"])
+ _, isGateway := f.(*gatewayFrontend)
+
sources := bi.Sources
- require.Equal(t, 2, len(sources))
+ if isGateway {
+ require.Equal(t, 3, len(sources), "%+v", sources)
+ sources = sources[1:]
+ }
+ require.Equal(t, 2, len(sources), "%+v", sources)
assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type)
assert.Equal(t, "docker.io/library/busybox:latest", sources[0].Ref)
assert.NotEmpty(t, sources[0].Pin)
assert.Equal(t, binfotypes.SourceTypeHTTP, sources[1].Type)
- assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", sources[1].Ref)
+ assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", sources[1].Ref)
assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", sources[1].Pin)
}
}
@@ -327,35 +355,38 @@ FROM scratch
COPY --from=base /out /
`
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
+ var exports []client.ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []client.ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": "reg.dummy:5000/buildkit/test:latest",
+ },
+ }}
+ } else {
+ exports = []client.ExportEntry{{
+ Type: client.ExporterOCI,
+ Attrs: map[string]string{},
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
+ }}
+ }
res, err := f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
"build-arg:foo": "bar",
"context:busybox": "docker-image://alpine",
},
- Exports: []client.ExportEntry{
- {
- Type: client.ExporterOCI,
- Output: fixedWriteCloser(outW),
- },
- },
+ Exports: exports,
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -376,8 +407,15 @@ COPY --from=base /out /
require.Contains(t, bi.Attrs, "build-arg:foo")
require.Equal(t, "bar", *bi.Attrs["build-arg:foo"])
+ _, isGateway := f.(*gatewayFrontend)
+
sources := bi.Sources
- require.Equal(t, 1, len(sources))
+ if isGateway {
+ require.Equal(t, 2, len(sources), "%+v", sources)
+ sources = sources[1:]
+ } else {
+ require.Equal(t, 1, len(sources))
+ }
assert.Equal(t, binfotypes.SourceTypeDockerImage, sources[0].Type)
assert.Equal(t, "docker.io/library/alpine:latest", sources[0].Ref)
assert.NotEmpty(t, sources[0].Pin)
@@ -394,45 +432,48 @@ FROM scratch
COPY --from=base /o* /
`
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- out := filepath.Join(destDir, "out.tar")
- outW, err := os.Create(out)
- require.NoError(t, err)
-
outf := []byte(`dummy-result`)
- dir2, err := tmpdir(
+ dir2, err := integration.Tmpdir(
+ t,
fstest.CreateFile("out", outf, 0600),
fstest.CreateFile("out2", outf, 0600),
fstest.CreateFile(".dockerignore", []byte("out2\n"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir2)
+
+ var exports []client.ExportEntry
+ if integration.IsTestDockerdMoby(sb) {
+ exports = []client.ExportEntry{{
+ Type: "moby",
+ Attrs: map[string]string{
+ "name": "reg.dummy:5000/buildkit/test:latest",
+ },
+ }}
+ } else {
+ exports = []client.ExportEntry{{
+ Type: client.ExporterOCI,
+ Attrs: map[string]string{},
+ Output: fixedWriteCloser(nopWriteCloser{io.Discard}),
+ }}
+ }
res, err := f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
"build-arg:foo": "bar",
"context:base": "local:basedir",
},
- Exports: []client.ExportEntry{
- {
- Type: client.ExporterOCI,
- Output: fixedWriteCloser(outW),
- },
- },
+ Exports: exports,
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -454,10 +495,17 @@ COPY --from=base /o* /
require.Contains(t, bi.Attrs, "build-arg:foo")
require.Equal(t, "bar", *bi.Attrs["build-arg:foo"])
- require.Equal(t, 0, len(bi.Sources))
+ _, isGateway := f.(*gatewayFrontend)
+ if isGateway {
+ require.Equal(t, 1, len(bi.Sources))
+ } else {
+ require.Equal(t, 0, len(bi.Sources))
+ }
}
func testBuildInfoDeps(t *testing.T, sb integration.Sandbox) {
+ t.Skip("deps temporarily disabled with SLSA provenance support")
+
ctx := sb.Context()
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -469,15 +517,15 @@ func testBuildInfoDeps(t *testing.T, sb integration.Sandbox) {
dockerfile := []byte(`
FROM alpine
ENV FOO=bar
-ADD https://raw.githubusercontent.com/moby/moby/master/README.md /
+ADD https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md /
RUN echo first > /out
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
dockerfile2 := []byte(`
FROM base AS build
@@ -486,11 +534,11 @@ FROM busybox
COPY --from=build /foo /out /
`)
- dir2, err := tmpdir(
+ dir2, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile2, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{})
@@ -545,9 +593,7 @@ COPY --from=build /foo /out /
return res, nil
}
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
res, err := c.Build(ctx, client.SolveOpt{
LocalDirs: map[string]string{
@@ -582,7 +628,7 @@ COPY --from=build /foo /out /
assert.NotEmpty(t, bi.Sources[0].Pin)
assert.Equal(t, binfotypes.SourceTypeHTTP, bi.Sources[1].Type)
- assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", bi.Sources[1].Ref)
+ assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", bi.Sources[1].Ref)
assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", bi.Sources[1].Pin)
require.Contains(t, bi.Deps, "base")
@@ -594,6 +640,8 @@ COPY --from=build /foo /out /
}
func testBuildInfoDepsMultiPlatform(t *testing.T, sb integration.Sandbox) {
+ t.Skip("deps temporarily disabled with SLSA provenance support")
+
ctx := sb.Context()
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -611,11 +659,11 @@ ENV FOO=bar-$TARGETARCH
RUN echo "foo $TARGETARCH" > /out
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
dockerfile2 := []byte(`
FROM base AS build
@@ -624,11 +672,11 @@ FROM busybox
COPY --from=build /foo /out /
`)
- dir2, err := tmpdir(
+ dir2, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile2, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{
@@ -691,9 +739,7 @@ COPY --from=build /foo /out /
return res, nil
}
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
res, err := c.Build(ctx, client.SolveOpt{
LocalDirs: map[string]string{
@@ -737,6 +783,8 @@ COPY --from=build /foo /out /
}
func testBuildInfoDepsMainNoSource(t *testing.T, sb integration.Sandbox) {
+ t.Skip("deps temporarily disabled with SLSA provenance support")
+
ctx := sb.Context()
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -748,26 +796,26 @@ func testBuildInfoDepsMainNoSource(t *testing.T, sb integration.Sandbox) {
dockerfile := []byte(`
FROM alpine
ENV FOO=bar
-ADD https://raw.githubusercontent.com/moby/moby/master/README.md /
+ADD https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md /
RUN echo first > /out
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
dockerfile2 := []byte(`
FROM base AS build
RUN echo "foo is $FOO" > /foo
`)
- dir2, err := tmpdir(
+ dir2, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile2, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{})
@@ -822,9 +870,7 @@ RUN echo "foo is $FOO" > /foo
return res, nil
}
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
res, err := c.Build(ctx, client.SolveOpt{
LocalDirs: map[string]string{
@@ -855,7 +901,7 @@ RUN echo "foo is $FOO" > /foo
require.Equal(t, 1, len(bi.Sources))
assert.Equal(t, binfotypes.SourceTypeHTTP, bi.Sources[0].Type)
- assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/master/README.md", bi.Sources[0].Ref)
+ assert.Equal(t, "https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md", bi.Sources[0].Ref)
assert.Equal(t, "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c", bi.Sources[0].Pin)
require.Contains(t, bi.Deps, "base")
diff --git a/frontend/dockerfile/dockerfile_heredoc_test.go b/frontend/dockerfile/dockerfile_heredoc_test.go
index 4be68738e982..cbb386b33e1c 100644
--- a/frontend/dockerfile/dockerfile_heredoc_test.go
+++ b/frontend/dockerfile/dockerfile_heredoc_test.go
@@ -2,7 +2,6 @@ package dockerfile
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -68,19 +67,17 @@ FROM scratch
COPY --from=build /dest /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -104,7 +101,7 @@ COPY --from=build /dest /
}
for name, content := range contents {
- dt, err := ioutil.ReadFile(filepath.Join(destDir, name))
+ dt, err := os.ReadFile(filepath.Join(destDir, name))
require.NoError(t, err)
require.Equal(t, content, string(dt))
}
@@ -141,19 +138,17 @@ COPY <<"EOF" rawslashfile3
EOF
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -169,31 +164,31 @@ EOF
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "quotefile"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "quotefile"))
require.NoError(t, err)
require.Equal(t, "\"quotes in file\"\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "slashfile1"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "slashfile1"))
require.NoError(t, err)
require.Equal(t, "\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "slashfile2"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "slashfile2"))
require.NoError(t, err)
require.Equal(t, "\\\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "slashfile3"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "slashfile3"))
require.NoError(t, err)
require.Equal(t, "$\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "rawslashfile1"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "rawslashfile1"))
require.NoError(t, err)
require.Equal(t, "\\\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "rawslashfile2"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "rawslashfile2"))
require.NoError(t, err)
require.Equal(t, "\\\\\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "rawslashfile3"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "rawslashfile3"))
require.NoError(t, err)
require.Equal(t, "\\$\n", string(dt))
}
@@ -213,19 +208,17 @@ FROM scratch
COPY --from=build /dest /dest
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -241,7 +234,7 @@ COPY --from=build /dest /dest
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "dest"))
require.NoError(t, err)
require.Equal(t, "i am\nroot\n", string(dt))
}
@@ -263,19 +256,17 @@ FROM scratch
COPY --from=build /dest /dest
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -291,7 +282,7 @@ COPY --from=build /dest /dest
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "dest"))
require.NoError(t, err)
require.Equal(t, "foo\n", string(dt))
}
@@ -314,19 +305,17 @@ FROM scratch
COPY --from=build /dest /dest
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -342,7 +331,7 @@ COPY --from=build /dest /dest
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "dest"))
require.NoError(t, err)
require.Equal(t, "hello\nworld\n", string(dt))
}
@@ -379,19 +368,17 @@ FROM scratch
COPY --from=build /dest /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -415,7 +402,7 @@ COPY --from=build /dest /
}
for name, content := range contents {
- dt, err := ioutil.ReadFile(filepath.Join(destDir, name))
+ dt, err := os.ReadFile(filepath.Join(destDir, name))
require.NoError(t, err)
require.Equal(t, content, string(dt))
}
@@ -471,19 +458,17 @@ FROM scratch
COPY --from=build /dest /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -511,7 +496,7 @@ COPY --from=build /dest /
}
for name, content := range contents {
- dt, err := ioutil.ReadFile(filepath.Join(destDir, name))
+ dt, err := os.ReadFile(filepath.Join(destDir, name))
require.NoError(t, err)
require.Equal(t, content, string(dt))
}
@@ -567,19 +552,17 @@ FROM scratch
COPY --from=build /dest /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -610,13 +593,14 @@ COPY --from=build /dest /
}
for name, content := range contents {
- dt, err := ioutil.ReadFile(filepath.Join(destDir, name))
+ dt, err := os.ReadFile(filepath.Join(destDir, name))
require.NoError(t, err)
require.Equal(t, content, string(dt))
}
}
func testOnBuildHeredoc(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
@@ -632,11 +616,11 @@ echo "hello world" >> /dest
EOF
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -660,31 +644,19 @@ EOF
}, nil)
require.NoError(t, err)
- dockerfile = []byte(fmt.Sprintf(`
- FROM %s
- `, target))
-
- dir, err = tmpdir(
- fstest.CreateFile("Dockerfile", dockerfile, 0600),
- )
- require.NoError(t, err)
- defer os.RemoveAll(dir)
-
dockerfile = []byte(fmt.Sprintf(`
FROM %s AS base
FROM scratch
COPY --from=base /dest /dest
`, target))
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -700,7 +672,7 @@ EOF
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "dest"))
require.NoError(t, err)
require.Equal(t, "hello world\n", string(dt))
}
diff --git a/frontend/dockerfile/dockerfile_mount_test.go b/frontend/dockerfile/dockerfile_mount_test.go
index 731d5f2d78dc..3a9ec3875119 100644
--- a/frontend/dockerfile/dockerfile_mount_test.go
+++ b/frontend/dockerfile/dockerfile_mount_test.go
@@ -1,10 +1,8 @@
package dockerfile
import (
- "io/ioutil"
"os"
"path/filepath"
- "strconv"
"testing"
"github.com/containerd/continuity/fs/fstest"
@@ -26,12 +24,11 @@ var mountTests = integration.TestFuncs(
testMountFromError,
testMountInvalid,
testMountTmpfsSize,
+ testCacheMountUser,
)
func init() {
allTests = append(allTests, mountTests...)
-
- fileOpTests = append(fileOpTests, integration.TestFuncs(testCacheMountUser)...)
}
func testMountContext(t *testing.T, sb integration.Sandbox) {
@@ -42,12 +39,12 @@ FROM busybox
RUN --mount=target=/context [ "$(cat /context/testfile)" == "contents0" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("testfile", []byte("contents0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -71,11 +68,11 @@ RUN --mount=target=/mytmp,type=tmpfs touch /mytmp/foo
RUN [ ! -f /mytmp/foo ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -98,11 +95,11 @@ FROM scratch
RUN --mont=target=/mytmp,type=tmpfs /bin/true
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -123,11 +120,11 @@ RUN --mont=target=/mytmp,type=tmpfs /bin/true
RUN --mount=typ=tmpfs /bin/true
`)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
LocalDirs: map[string]string{
@@ -144,11 +141,11 @@ RUN --mont=target=/mytmp,type=tmpfs /bin/true
RUN --mount=type=tmp /bin/true
`)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
LocalDirs: map[string]string{
@@ -176,20 +173,18 @@ from scratch
COPY --from=second /unique /unique
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("cachebust", []byte("0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -205,20 +200,18 @@ COPY --from=second /unique /unique
}, nil)
require.NoError(t, err)
- dt1, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ dt1, err := os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
// repeat with changed file that should be still cached by content
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("cachebust", []byte("1"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -234,34 +227,30 @@ COPY --from=second /unique /unique
}, nil)
require.NoError(t, err)
- dt2, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ dt2, err := os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
require.Equal(t, dt1, dt2)
}
func testCacheMountUser(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox
RUN --mount=type=cache,target=/mycache,uid=1001,gid=1002,mode=0751 [ "$(stat -c "%u %g %f" /mycache)" == "1001 1002 41e9" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -280,11 +269,11 @@ RUN --mount=type=cache,target=/mycache2 [ ! -f /mycache2/foo ]
RUN --mount=type=cache,target=/mycache [ -f /mycache/foo ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -309,11 +298,11 @@ RUN --mount=type=cache,target=/mycache touch /mycache/foo
RUN --mount=type=cache,target=$SOME_PATH [ -f $SOME_PATH/foo ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -338,11 +327,11 @@ RUN --mount=type=$MNT_TYPE,target=/mycache2 touch /mycache2/foo
RUN --mount=type=cache,target=/mycache2 [ -f /mycache2/foo ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -372,11 +361,11 @@ FROM stage1
RUN --mount=type=$MNT_TYPE2,id=$MNT_ID,target=/whatever [ -f /whatever/foo ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -403,11 +392,11 @@ RUN --mount=type=cache,id=mycache,target=/tmp/meta touch /tmp/meta/foo
RUN --mount=type=cache,id=mycache,target=$META_PATH [ -f /tmp/meta/foo ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -434,11 +423,11 @@ ENV ttt=test
RUN --mount=from=$ttt,type=cache,target=/tmp ls
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -464,19 +453,17 @@ FROM scratch
COPY --from=base /tmpfssize /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -492,7 +479,7 @@ COPY --from=base /tmpfssize /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "tmpfssize"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "tmpfssize"))
require.NoError(t, err)
require.Contains(t, string(dt), `size=131072k`)
}
diff --git a/frontend/dockerfile/dockerfile_outline_test.go b/frontend/dockerfile/dockerfile_outline_test.go
new file mode 100644
index 000000000000..346fde14dff7
--- /dev/null
+++ b/frontend/dockerfile/dockerfile_outline_test.go
@@ -0,0 +1,309 @@
+package dockerfile
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "testing"
+
+ "github.com/containerd/continuity/fs/fstest"
+ "github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/frontend/dockerfile/builder"
+ gateway "github.com/moby/buildkit/frontend/gateway/client"
+ "github.com/moby/buildkit/frontend/subrequests"
+ "github.com/moby/buildkit/frontend/subrequests/outline"
+ "github.com/moby/buildkit/util/testutil/integration"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+)
+
+var outlineTests = integration.TestFuncs(
+ testOutlineArgs,
+ testOutlineSecrets,
+ testOutlineDescribeDefinition,
+)
+
+func testOutlineArgs(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendOutline)
+ f := getFrontend(t, sb)
+ if _, ok := f.(*clientFrontend); !ok {
+ t.Skip("only test with client frontend")
+ }
+
+ dockerfile := []byte(`ARG inherited=box
+ARG inherited2=box2
+ARG unused=abc${inherited2}
+# sfx is a suffix
+ARG sfx="usy${inherited}"
+
+FROM b${sfx} AS first
+# this is not assigned to anything
+ARG FOO=123
+# BAR is a number
+ARG BAR=456
+RUN true
+
+FROM alpine${unused} AS second
+ARG BAZ
+RUN true
+
+FROM scratch AS third
+ARG ABC=a
+
+# target defines build target
+FROM third AS target
+COPY --from=first /etc/passwd /
+
+FROM second
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
+ )
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ destDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(destDir)
+
+ called := false
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res, err := c.Solve(ctx, gateway.SolveRequest{
+ FrontendOpt: map[string]string{
+ "frontend.caps": "moby.buildkit.frontend.subrequests",
+ "requestid": "frontend.outline",
+ "build-arg:BAR": "678",
+ "target": "target",
+ },
+ Frontend: "dockerfile.v0",
+ })
+ require.NoError(t, err)
+
+ outline, err := unmarshalOutline(res)
+ require.NoError(t, err)
+
+ require.Equal(t, "target", outline.Name)
+ require.Equal(t, "defines build target", outline.Description)
+
+ require.Equal(t, 1, len(outline.Sources))
+ require.Equal(t, dockerfile, outline.Sources[0])
+
+ require.Equal(t, 5, len(outline.Args))
+
+ arg := outline.Args[0]
+ require.Equal(t, "inherited", arg.Name)
+ require.Equal(t, "box", arg.Value)
+ require.Equal(t, "", arg.Description)
+ require.Equal(t, int32(0), arg.Location.SourceIndex)
+ require.Equal(t, int32(1), arg.Location.Ranges[0].Start.Line)
+
+ arg = outline.Args[1]
+ require.Equal(t, "sfx", arg.Name)
+ require.Equal(t, "usybox", arg.Value)
+ require.Equal(t, "is a suffix", arg.Description)
+ require.Equal(t, int32(5), arg.Location.Ranges[0].Start.Line)
+
+ arg = outline.Args[2]
+ require.Equal(t, "FOO", arg.Name)
+ require.Equal(t, "123", arg.Value)
+ require.Equal(t, "", arg.Description)
+ require.Equal(t, int32(9), arg.Location.Ranges[0].Start.Line)
+
+ arg = outline.Args[3]
+ require.Equal(t, "BAR", arg.Name)
+ require.Equal(t, "678", arg.Value)
+ require.Equal(t, "is a number", arg.Description)
+
+ arg = outline.Args[4]
+ require.Equal(t, "ABC", arg.Name)
+ require.Equal(t, "a", arg.Value)
+
+ called = true
+ return nil, nil
+ }
+
+ _, err = c.Build(sb.Context(), client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ require.True(t, called)
+}
+
+func testOutlineSecrets(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendOutline)
+ f := getFrontend(t, sb)
+ if _, ok := f.(*clientFrontend); !ok {
+ t.Skip("only test with client frontend")
+ }
+
+ dockerfile := []byte(`
+FROM busybox AS first
+RUN --mount=type=secret,target=/etc/passwd,required=true --mount=type=ssh true
+
+FROM alpine AS second
+RUN --mount=type=secret,id=unused --mount=type=ssh,id=ssh2 true
+
+FROM scratch AS third
+ARG BAR
+RUN --mount=type=secret,id=second${BAR} true
+
+FROM third AS target
+COPY --from=first /foo /
+RUN --mount=type=ssh,id=ssh3,required true
+
+FROM second
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
+ )
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ destDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(destDir)
+
+ called := false
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res, err := c.Solve(ctx, gateway.SolveRequest{
+ FrontendOpt: map[string]string{
+ "frontend.caps": "moby.buildkit.frontend.subrequests",
+ "requestid": "frontend.outline",
+ "build-arg:BAR": "678",
+ "target": "target",
+ },
+ Frontend: "dockerfile.v0",
+ })
+ require.NoError(t, err)
+
+ outline, err := unmarshalOutline(res)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(outline.Sources))
+ require.Equal(t, dockerfile, outline.Sources[0])
+
+ require.Equal(t, 2, len(outline.Secrets))
+
+ secret := outline.Secrets[0]
+ require.Equal(t, "passwd", secret.Name)
+ require.Equal(t, true, secret.Required)
+ require.Equal(t, int32(0), secret.Location.SourceIndex)
+ require.Equal(t, int32(3), secret.Location.Ranges[0].Start.Line)
+
+ secret = outline.Secrets[1]
+ require.Equal(t, "second678", secret.Name)
+ require.Equal(t, false, secret.Required)
+ require.Equal(t, int32(0), secret.Location.SourceIndex)
+ require.Equal(t, int32(10), secret.Location.Ranges[0].Start.Line)
+
+ require.Equal(t, 2, len(outline.SSH))
+
+ ssh := outline.SSH[0]
+ require.Equal(t, "default", ssh.Name)
+ require.Equal(t, false, ssh.Required)
+ require.Equal(t, int32(0), ssh.Location.SourceIndex)
+ require.Equal(t, int32(3), ssh.Location.Ranges[0].Start.Line)
+
+ ssh = outline.SSH[1]
+ require.Equal(t, "ssh3", ssh.Name)
+ require.Equal(t, true, ssh.Required)
+ require.Equal(t, int32(0), ssh.Location.SourceIndex)
+ require.Equal(t, int32(14), ssh.Location.Ranges[0].Start.Line)
+
+ called = true
+ return nil, nil
+ }
+
+ _, err = c.Build(sb.Context(), client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ require.True(t, called)
+}
+
+func testOutlineDescribeDefinition(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendOutline)
+ f := getFrontend(t, sb)
+ if _, ok := f.(*clientFrontend); !ok {
+ t.Skip("only test with client frontend")
+ }
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ dockerfile := []byte(`
+FROM scratch
+COPY Dockerfile Dockerfile
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ called := false
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ reqs, err := subrequests.Describe(ctx, c)
+ require.NoError(t, err)
+
+ require.True(t, len(reqs) > 0)
+
+ hasOutline := false
+
+ for _, req := range reqs {
+ if req.Name != "frontend.outline" {
+ continue
+ }
+ hasOutline = true
+ require.Equal(t, subrequests.RequestType("rpc"), req.Type)
+ require.NotEqual(t, req.Version, "")
+ }
+ require.True(t, hasOutline)
+
+ called = true
+ return nil, nil
+ }
+
+ _, err = c.Build(sb.Context(), client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ require.True(t, called)
+}
+
+func unmarshalOutline(res *gateway.Result) (*outline.Outline, error) {
+ dt, ok := res.Metadata["result.json"]
+ if !ok {
+ return nil, errors.Errorf("missing frontend.outline")
+ }
+ var o outline.Outline
+ if err := json.Unmarshal(dt, &o); err != nil {
+ return nil, err
+ }
+ return &o, nil
+}
diff --git a/frontend/dockerfile/dockerfile_provenance_test.go b/frontend/dockerfile/dockerfile_provenance_test.go
new file mode 100644
index 000000000000..835f72a52b86
--- /dev/null
+++ b/frontend/dockerfile/dockerfile_provenance_test.go
@@ -0,0 +1,909 @@
+package dockerfile
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/containerd/containerd/platforms"
+ "github.com/containerd/continuity/fs/fstest"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ "github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/client/llb"
+ "github.com/moby/buildkit/exporter/containerimage/exptypes"
+ "github.com/moby/buildkit/frontend/dockerfile/builder"
+ gateway "github.com/moby/buildkit/frontend/gateway/client"
+ "github.com/moby/buildkit/solver/llbsolver/provenance"
+ "github.com/moby/buildkit/util/contentutil"
+ "github.com/moby/buildkit/util/testutil"
+ "github.com/moby/buildkit/util/testutil/integration"
+ ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+)
+
+func testProvenanceAttestation(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+FROM busybox:latest
+RUN echo "ok" > /foo
+`)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ for _, mode := range []string{"", "min", "max"} {
+ t.Run(mode, func(t *testing.T) {
+ var target string
+ if target == "" {
+ target = registry + "/buildkit/testwithprovenance:none"
+ } else {
+ target = registry + "/buildkit/testwithprovenance:" + mode
+ }
+
+ provReq := ""
+ if mode != "" {
+ provReq = "mode=" + mode
+ }
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ FrontendAttrs: map[string]string{
+ "attest:provenance": provReq,
+ "build-arg:FOO": "bar",
+ "label:lbl": "abc",
+ "vcs:source": "https://user:pass@example.invalid/repo.git",
+ "vcs:revision": "123456",
+ "filename": "Dockerfile",
+ builder.DefaultLocalNameContext + ":foo": "https://foo:bar@example.invalid/foo.html",
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
+
+ img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec())))
+ require.NotNil(t, img)
+ require.Equal(t, []byte("ok\n"), img.Layers[1]["foo"].Data)
+
+ att := imgs.Find("unknown/unknown")
+ require.NotNil(t, att)
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest))
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest")
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const
+
+ type stmtT struct {
+ Predicate provenance.ProvenancePredicate `json:"predicate"`
+ }
+ var stmt stmtT
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt))
+ pred := stmt.Predicate
+
+ require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType)
+ require.Equal(t, "", pred.Builder.ID)
+
+ require.Equal(t, "", pred.Invocation.ConfigSource.URI)
+
+ _, isClient := f.(*clientFrontend)
+ _, isGateway := f.(*gatewayFrontend)
+
+ args := pred.Invocation.Parameters.Args
+ if isClient {
+ require.Equal(t, "", pred.Invocation.Parameters.Frontend)
+ require.Equal(t, 0, len(args), "%v", args)
+ require.False(t, pred.Metadata.Completeness.Parameters)
+ require.Equal(t, "", pred.Invocation.ConfigSource.EntryPoint)
+ } else if isGateway {
+ require.Equal(t, "gateway.v0", pred.Invocation.Parameters.Frontend)
+
+ if mode == "max" || mode == "" {
+ require.Equal(t, 4, len(args), "%v", args)
+ require.True(t, pred.Metadata.Completeness.Parameters)
+
+ require.Equal(t, "bar", args["build-arg:FOO"])
+ require.Equal(t, "abc", args["label:lbl"])
+ require.Contains(t, args["source"], "buildkit_test/")
+ } else {
+ require.False(t, pred.Metadata.Completeness.Parameters)
+ require.Equal(t, 2, len(args), "%v", args)
+ require.Contains(t, args["source"], "buildkit_test/")
+ }
+ require.Equal(t, "https://xxxxx:xxxxx@example.invalid/foo.html", args["context:foo"])
+ } else {
+ require.Equal(t, "dockerfile.v0", pred.Invocation.Parameters.Frontend)
+
+ if mode == "max" || mode == "" {
+ require.Equal(t, 3, len(args))
+ require.True(t, pred.Metadata.Completeness.Parameters)
+
+ require.Equal(t, "bar", args["build-arg:FOO"])
+ require.Equal(t, "abc", args["label:lbl"])
+ } else {
+ require.False(t, pred.Metadata.Completeness.Parameters)
+ require.Equal(t, 1, len(args), "%v", args)
+ }
+ require.Equal(t, "https://xxxxx:xxxxx@example.invalid/foo.html", args["context:foo"])
+ }
+
+ expectedBase := "pkg:docker/busybox@latest?platform=" + url.PathEscape(platforms.Format(platforms.Normalize(platforms.DefaultSpec())))
+ if isGateway {
+ require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials)
+ require.Contains(t, pred.Materials[0].URI, "docker/buildkit_test")
+ require.Equal(t, expectedBase, pred.Materials[1].URI)
+ require.NotEmpty(t, pred.Materials[1].Digest["sha256"])
+ } else {
+ require.Equal(t, 1, len(pred.Materials), "%+v", pred.Materials)
+ require.Equal(t, expectedBase, pred.Materials[0].URI)
+ require.NotEmpty(t, pred.Materials[0].Digest["sha256"])
+ }
+
+ if !isClient {
+ require.Equal(t, "Dockerfile", pred.Invocation.ConfigSource.EntryPoint)
+ require.Equal(t, "https://xxxxx:xxxxx@example.invalid/repo.git", pred.Metadata.BuildKitMetadata.VCS["source"])
+ require.Equal(t, "123456", pred.Metadata.BuildKitMetadata.VCS["revision"])
+ }
+
+ require.NotEmpty(t, pred.Metadata.BuildInvocationID)
+
+ require.Equal(t, 2, len(pred.Invocation.Parameters.Locals), "%+v", pred.Invocation.Parameters.Locals)
+ require.Equal(t, "context", pred.Invocation.Parameters.Locals[0].Name)
+ require.Equal(t, "dockerfile", pred.Invocation.Parameters.Locals[1].Name)
+
+ require.NotNil(t, pred.Metadata.BuildFinishedOn)
+ require.True(t, time.Since(*pred.Metadata.BuildFinishedOn) < 5*time.Minute)
+ require.NotNil(t, pred.Metadata.BuildStartedOn)
+ require.True(t, time.Since(*pred.Metadata.BuildStartedOn) < 5*time.Minute)
+ require.True(t, pred.Metadata.BuildStartedOn.Before(*pred.Metadata.BuildFinishedOn))
+
+ require.True(t, pred.Metadata.Completeness.Environment)
+ require.Equal(t, platforms.Format(platforms.Normalize(platforms.DefaultSpec())), pred.Invocation.Environment.Platform)
+
+ require.False(t, pred.Metadata.Completeness.Materials)
+ require.False(t, pred.Metadata.Reproducible)
+ require.False(t, pred.Metadata.Hermetic)
+
+ if mode == "max" || mode == "" {
+ require.Equal(t, 2, len(pred.Metadata.BuildKitMetadata.Layers))
+ require.NotNil(t, pred.Metadata.BuildKitMetadata.Source)
+ require.Equal(t, "Dockerfile", pred.Metadata.BuildKitMetadata.Source.Infos[0].Filename)
+ require.Equal(t, dockerfile, pred.Metadata.BuildKitMetadata.Source.Infos[0].Data)
+ require.NotNil(t, pred.BuildConfig)
+
+ require.Equal(t, 3, len(pred.BuildConfig.Definition))
+ } else {
+ require.Equal(t, 0, len(pred.Metadata.BuildKitMetadata.Layers))
+ require.Nil(t, pred.Metadata.BuildKitMetadata.Source)
+ require.Nil(t, pred.BuildConfig)
+ }
+ })
+ }
+}
+
+func testGitProvenanceAttestation(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+FROM busybox:latest
+RUN --network=none echo "git" > /foo
+COPY myapp.Dockerfile /
+`)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("myapp.Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ err = runShell(dir,
+ "git init",
+ "git config --local user.email test",
+ "git config --local user.name test",
+ "git add myapp.Dockerfile",
+ "git commit -m initial",
+ "git branch v1",
+ "git update-server-info",
+ )
+ require.NoError(t, err)
+
+ cmd := exec.Command("git", "rev-parse", "v1")
+ cmd.Dir = dir
+ expectedGitSHA, err := cmd.Output()
+ require.NoError(t, err)
+
+ server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(dir))))
+ defer server.Close()
+
+ target := registry + "/buildkit/testwithprovenance:git"
+
+ // inject dummy credentials to test that they are masked
+ expectedURL := strings.Replace(server.URL, "http://", "http://xxxxx:xxxxx@", 1)
+ require.NotEqual(t, expectedURL, server.URL)
+ server.URL = strings.Replace(server.URL, "http://", "http://user:pass@", 1)
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "context": server.URL + "/.git#v1",
+ "attest:provenance": "",
+ "filename": "myapp.Dockerfile",
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
+
+ img := imgs.Find(platforms.Format(platforms.Normalize(platforms.DefaultSpec())))
+ require.NotNil(t, img)
+ require.Equal(t, []byte("git\n"), img.Layers[1]["foo"].Data)
+
+ att := imgs.Find("unknown/unknown")
+ require.NotNil(t, att)
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.digest"], string(img.Desc.Digest))
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest")
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const
+
+ type stmtT struct {
+ Predicate provenance.ProvenancePredicate `json:"predicate"`
+ }
+ var stmt stmtT
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt))
+ pred := stmt.Predicate
+
+ _, isClient := f.(*clientFrontend)
+ _, isGateway := f.(*gatewayFrontend)
+
+ if isClient {
+ require.Empty(t, pred.Invocation.Parameters.Frontend)
+ require.Equal(t, "", pred.Invocation.ConfigSource.URI)
+ require.Equal(t, "", pred.Invocation.ConfigSource.EntryPoint)
+ } else {
+ require.NotEmpty(t, pred.Invocation.Parameters.Frontend)
+ require.Equal(t, expectedURL+"/.git#v1", pred.Invocation.ConfigSource.URI)
+ require.Equal(t, "myapp.Dockerfile", pred.Invocation.ConfigSource.EntryPoint)
+ }
+
+ expBase := "pkg:docker/busybox@latest?platform=" + url.PathEscape(platforms.Format(platforms.Normalize(platforms.DefaultSpec())))
+ if isGateway {
+ require.Equal(t, 3, len(pred.Materials), "%+v", pred.Materials)
+
+ require.Contains(t, pred.Materials[0].URI, "pkg:docker/buildkit_test/")
+ require.NotEmpty(t, pred.Materials[0].Digest)
+
+ require.Equal(t, expBase, pred.Materials[1].URI)
+ require.NotEmpty(t, pred.Materials[1].Digest["sha256"])
+
+ require.Equal(t, expectedURL+"/.git#v1", pred.Materials[2].URI)
+ require.Equal(t, strings.TrimSpace(string(expectedGitSHA)), pred.Materials[2].Digest["sha1"])
+ } else {
+ require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials)
+
+ require.Equal(t, expBase, pred.Materials[0].URI)
+ require.NotEmpty(t, pred.Materials[0].Digest["sha256"])
+
+ require.Equal(t, expectedURL+"/.git#v1", pred.Materials[1].URI)
+ require.Equal(t, strings.TrimSpace(string(expectedGitSHA)), pred.Materials[1].Digest["sha1"])
+ }
+
+ require.Equal(t, 0, len(pred.Invocation.Parameters.Locals))
+
+ require.True(t, pred.Metadata.Completeness.Materials)
+ require.True(t, pred.Metadata.Completeness.Environment)
+ require.True(t, pred.Metadata.Hermetic)
+
+ if isClient {
+ require.False(t, pred.Metadata.Completeness.Parameters)
+ } else {
+ require.True(t, pred.Metadata.Completeness.Parameters)
+ }
+ require.False(t, pred.Metadata.Reproducible)
+
+ require.Equal(t, 0, len(pred.Metadata.BuildKitMetadata.VCS), "%+v", pred.Metadata.BuildKitMetadata.VCS)
+}
+
+func testMultiPlatformProvenance(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureMultiPlatform, integration.FeatureProvenance)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+FROM busybox:latest
+ARG TARGETARCH
+RUN echo "ok-$TARGETARCH" > /foo
+`)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/testmultiprovenance:latest"
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ FrontendAttrs: map[string]string{
+ "attest:provenance": "mode=max",
+ "build-arg:FOO": "bar",
+ "label:lbl": "abc",
+ "platform": "linux/amd64,linux/arm64",
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 4, len(imgs.Images))
+
+ _, isClient := f.(*clientFrontend)
+ _, isGateway := f.(*gatewayFrontend)
+
+ for _, p := range []string{"linux/amd64", "linux/arm64"} {
+ img := imgs.Find(p)
+ require.NotNil(t, img)
+ if p == "linux/amd64" {
+ require.Equal(t, []byte("ok-amd64\n"), img.Layers[1]["foo"].Data)
+ } else {
+ require.Equal(t, []byte("ok-arm64\n"), img.Layers[1]["foo"].Data)
+ }
+
+ att := imgs.FindAttestation(p)
+ require.NotNil(t, att)
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest")
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const
+
+ type stmtT struct {
+ Predicate provenance.ProvenancePredicate `json:"predicate"`
+ }
+ var stmt stmtT
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt))
+ pred := stmt.Predicate
+
+ require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType)
+ require.Equal(t, "", pred.Builder.ID)
+ require.Equal(t, "", pred.Invocation.ConfigSource.URI)
+
+ if isGateway {
+ require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials)
+ require.Contains(t, pred.Materials[0].URI, "buildkit_test")
+ require.Contains(t, pred.Materials[1].URI, "pkg:docker/busybox@latest")
+ require.Contains(t, pred.Materials[1].URI, url.PathEscape(p))
+ } else {
+ require.Equal(t, 1, len(pred.Materials), "%+v", pred.Materials)
+ require.Contains(t, pred.Materials[0].URI, "pkg:docker/busybox@latest")
+ require.Contains(t, pred.Materials[0].URI, url.PathEscape(p))
+ }
+
+ args := pred.Invocation.Parameters.Args
+ if isClient {
+ require.Equal(t, 0, len(args), "%+v", args)
+ } else if isGateway {
+ require.Equal(t, 3, len(args), "%+v", args)
+ require.Equal(t, "bar", args["build-arg:FOO"])
+ require.Equal(t, "abc", args["label:lbl"])
+ require.Contains(t, args["source"], "buildkit_test/")
+ } else {
+ require.Equal(t, 2, len(args), "%+v", args)
+ require.Equal(t, "bar", args["build-arg:FOO"])
+ require.Equal(t, "abc", args["label:lbl"])
+ }
+ }
+}
+
+func testClientFrontendProvenance(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance)
+ // Building with client frontend does not capture frontend provenance
+ // because frontend runs in client, not in BuildKit.
+ // This test builds Dockerfile inside a client frontend ensuring that
+ // in that case frontend provenance is captured.
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/clientprovenance:latest"
+
+ f := getFrontend(t, sb)
+
+ _, isClient := f.(*clientFrontend)
+ if !isClient {
+ t.Skip("not a client frontend")
+ }
+
+ dockerfile := []byte(`
+ FROM alpine as x86target
+ RUN echo "alpine" > /foo
+
+ FROM busybox:latest AS armtarget
+ RUN --network=none echo "bbox" > /foo
+ `)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.HTTP("https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md")
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // This does not show up in provenance
+ res0, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ dt, err := res0.Ref.ReadFile(ctx, gateway.ReadRequest{
+ Filename: "README.md",
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ res1, err := c.Solve(ctx, gateway.SolveRequest{
+ Frontend: "dockerfile.v0",
+ FrontendOpt: map[string]string{
+ "build-arg:FOO": string(dt[:3]),
+ "target": "armtarget",
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ res2, err := c.Solve(ctx, gateway.SolveRequest{
+ Frontend: "dockerfile.v0",
+ FrontendOpt: map[string]string{
+ "build-arg:FOO": string(dt[4:8]),
+ "target": "x86target",
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ res := gateway.NewResult()
+ res.AddRef("linux/arm64", res1.Ref)
+ res.AddRef("linux/amd64", res2.Ref)
+
+ pl, err := json.Marshal(exptypes.Platforms{
+ Platforms: []exptypes.Platform{
+ {
+ ID: "linux/arm64",
+ Platform: ocispecs.Platform{OS: "linux", Architecture: "arm64"},
+ },
+ {
+ ID: "linux/amd64",
+ Platform: ocispecs.Platform{OS: "linux", Architecture: "amd64"},
+ },
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ res.AddMeta(exptypes.ExporterPlatformsKey, pl)
+
+ return res, nil
+ }
+
+ _, err = c.Build(sb.Context(), client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:provenance": "mode=full",
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 4, len(imgs.Images))
+
+ img := imgs.Find("linux/arm64")
+ require.NotNil(t, img)
+ require.Equal(t, []byte("bbox\n"), img.Layers[1]["foo"].Data)
+
+ att := imgs.FindAttestation("linux/arm64")
+ require.NotNil(t, att)
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest")
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const
+
+ type stmtT struct {
+ Predicate provenance.ProvenancePredicate `json:"predicate"`
+ }
+ var stmt stmtT
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt))
+ pred := stmt.Predicate
+
+ require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType)
+ require.Equal(t, "", pred.Builder.ID)
+ require.Equal(t, "", pred.Invocation.ConfigSource.URI)
+
+ args := pred.Invocation.Parameters.Args
+ require.Equal(t, 2, len(args), "%+v", args)
+ require.Equal(t, "The", args["build-arg:FOO"])
+ require.Equal(t, "armtarget", args["target"])
+
+ require.Equal(t, 2, len(pred.Invocation.Parameters.Locals))
+ require.Equal(t, 1, len(pred.Materials))
+ require.Contains(t, pred.Materials[0].URI, "docker/busybox")
+
+ // amd64
+ img = imgs.Find("linux/amd64")
+ require.NotNil(t, img)
+ require.Equal(t, []byte("alpine\n"), img.Layers[1]["foo"].Data)
+
+ att = imgs.FindAttestation("linux/amd64")
+ require.NotNil(t, att)
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest")
+ attest = intoto.Statement{}
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const
+
+ stmt = stmtT{}
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt))
+ pred = stmt.Predicate
+
+ require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType)
+ require.Equal(t, "", pred.Builder.ID)
+ require.Equal(t, "", pred.Invocation.ConfigSource.URI)
+
+ args = pred.Invocation.Parameters.Args
+ require.Equal(t, 2, len(args), "%+v", args)
+ require.Equal(t, "Moby", args["build-arg:FOO"])
+ require.Equal(t, "x86target", args["target"])
+
+ require.Equal(t, 2, len(pred.Invocation.Parameters.Locals))
+ require.Equal(t, 1, len(pred.Materials))
+ require.Contains(t, pred.Materials[0].URI, "docker/alpine")
+}
+
+func testClientLLBProvenance(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/clientprovenance:llb"
+
+ f := getFrontend(t, sb)
+
+ _, isClient := f.(*clientFrontend)
+ if !isClient {
+ t.Skip("not a client frontend")
+ }
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ st := llb.HTTP("https://raw.githubusercontent.com/moby/moby/v20.10.21/README.md")
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ // this also shows up in the provenance
+ res0, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ dt, err := res0.Ref.ReadFile(ctx, gateway.ReadRequest{
+ Filename: "README.md",
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ st = llb.Image("alpine").File(llb.Mkfile("/foo", 0600, dt))
+ def, err = st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ res1, err := c.Solve(ctx, gateway.SolveRequest{
+ Definition: def.ToPB(),
+ })
+ if err != nil {
+ return nil, err
+ }
+ return res1, nil
+ }
+
+ _, err = c.Build(sb.Context(), client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "attest:provenance": "mode=full",
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ LocalDirs: map[string]string{},
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
+
+ nativePlatform := platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
+
+ img := imgs.Find(nativePlatform)
+ require.NotNil(t, img)
+ require.Contains(t, string(img.Layers[1]["foo"].Data), "The Moby Project")
+
+ att := imgs.FindAttestation(nativePlatform)
+ require.NotNil(t, att)
+ require.Equal(t, att.Desc.Annotations["vnd.docker.reference.type"], "attestation-manifest")
+ var attest intoto.Statement
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
+ require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
+ require.Equal(t, "https://slsa.dev/provenance/v0.2", attest.PredicateType) // intentionally not const
+
+ type stmtT struct {
+ Predicate provenance.ProvenancePredicate `json:"predicate"`
+ }
+ var stmt stmtT
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt))
+ pred := stmt.Predicate
+
+ require.Equal(t, "https://mobyproject.org/buildkit@v1", pred.BuildType)
+ require.Equal(t, "", pred.Builder.ID)
+ require.Equal(t, "", pred.Invocation.ConfigSource.URI)
+
+ args := pred.Invocation.Parameters.Args
+ require.Equal(t, 0, len(args), "%+v", args)
+ require.Equal(t, 0, len(pred.Invocation.Parameters.Locals))
+
+ require.Equal(t, 2, len(pred.Materials), "%+v", pred.Materials)
+ require.Contains(t, pred.Materials[0].URI, "docker/alpine")
+ require.Contains(t, pred.Materials[1].URI, "README.md")
+}
+
+func testSecretSSHProvenance(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureProvenance)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+FROM busybox:latest
+RUN --mount=type=secret,id=mysecret --mount=type=secret,id=othersecret --mount=type=ssh echo "ok" > /foo
+`)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ target := registry + "/buildkit/testsecretprovenance:latest"
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ FrontendAttrs: map[string]string{
+ "attest:provenance": "mode=max",
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
+ require.NoError(t, err)
+ require.Equal(t, 2, len(imgs.Images))
+
+ expPlatform := platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
+
+ img := imgs.Find(expPlatform)
+ require.NotNil(t, img)
+ require.Equal(t, []byte("ok\n"), img.Layers[1]["foo"].Data)
+
+ att := imgs.FindAttestation(expPlatform)
+ type stmtT struct {
+ Predicate provenance.ProvenancePredicate `json:"predicate"`
+ }
+ var stmt stmtT
+ require.NoError(t, json.Unmarshal(att.LayersRaw[0], &stmt))
+ pred := stmt.Predicate
+
+ require.Equal(t, 2, len(pred.Invocation.Parameters.Secrets), "%+v", pred.Invocation.Parameters.Secrets)
+ require.Equal(t, "mysecret", pred.Invocation.Parameters.Secrets[0].ID)
+ require.True(t, pred.Invocation.Parameters.Secrets[0].Optional)
+ require.Equal(t, "othersecret", pred.Invocation.Parameters.Secrets[1].ID)
+ require.True(t, pred.Invocation.Parameters.Secrets[1].Optional)
+
+ require.Equal(t, 1, len(pred.Invocation.Parameters.SSH), "%+v", pred.Invocation.Parameters.SSH)
+ require.Equal(t, "default", pred.Invocation.Parameters.SSH[0].ID)
+ require.True(t, pred.Invocation.Parameters.SSH[0].Optional)
+}
+
+func testNilProvenance(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureProvenance)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+FROM scratch
+ENV FOO=bar
+`)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ FrontendAttrs: map[string]string{
+ "attest:provenance": "mode=max",
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+}
diff --git a/frontend/dockerfile/dockerfile_runnetwork_test.go b/frontend/dockerfile/dockerfile_runnetwork_test.go
index 8244d48f5716..2d02110e383c 100644
--- a/frontend/dockerfile/dockerfile_runnetwork_test.go
+++ b/frontend/dockerfile/dockerfile_runnetwork_test.go
@@ -41,11 +41,11 @@ FROM busybox
RUN ip link show eth0
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -77,11 +77,11 @@ RUN --network=none ! ip link show eth0
dockerfile += "RUN ip link show eth0"
}
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -118,11 +118,11 @@ RUN --network=host nc 127.0.0.1 %s | grep foo
dockerfile += fmt.Sprintf(`RUN ! nc 127.0.0.1 %s | grep foo`, port)
}
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -141,8 +141,12 @@ RUN --network=host nc 127.0.0.1 %s | grep foo
case networkHostGranted:
require.NoError(t, err)
case networkHostDenied:
- require.Error(t, err)
- require.Contains(t, err.Error(), "entitlement network.host is not allowed")
+ if !integration.IsTestDockerd() {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "entitlement network.host is not allowed")
+ } else {
+ require.NoError(t, err)
+ }
default:
require.Fail(t, "unexpected network.host mode %q", hostAllowed)
}
@@ -162,11 +166,11 @@ RUN nc 127.0.0.1 %s | grep foo
RUN --network=none ! nc -z 127.0.0.1 %s
`, port, port)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -188,8 +192,12 @@ RUN --network=none ! nc -z 127.0.0.1 %s
case networkHostGranted:
require.NoError(t, err)
case networkHostDenied:
- require.Error(t, err)
- require.Contains(t, err.Error(), "entitlement network.host is not allowed")
+ if !integration.IsTestDockerd() {
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "entitlement network.host is not allowed")
+ } else {
+ require.NoError(t, err)
+ }
default:
require.Fail(t, "unexpected network.host mode %q", hostAllowed)
}
diff --git a/frontend/dockerfile/dockerfile_runsecurity_test.go b/frontend/dockerfile/dockerfile_runsecurity_test.go
index 4726182a9d71..99e3b3c49d38 100644
--- a/frontend/dockerfile/dockerfile_runsecurity_test.go
+++ b/frontend/dockerfile/dockerfile_runsecurity_test.go
@@ -4,7 +4,6 @@
package dockerfile
import (
- "os"
"testing"
"github.com/containerd/continuity/fs/fstest"
@@ -58,11 +57,11 @@ RUN --security=insecure ls -l /dev && dd if=/dev/zero of=disk.img bs=20M count=1
rm disk.img
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -97,11 +96,11 @@ RUN --security=insecure [ "$(printf '%x' $(( $(cat /proc/self/status | grep CapB
RUN [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 00000000a80425fb" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -135,11 +134,11 @@ FROM busybox
RUN --security=sandbox [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 00000000a80425fb" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -163,11 +162,11 @@ FROM busybox
RUN [ "$(cat /proc/self/status | grep CapBnd)" == "CapBnd: 00000000a80425fb" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
diff --git a/frontend/dockerfile/dockerfile_secrets_test.go b/frontend/dockerfile/dockerfile_secrets_test.go
index ae00fac07df6..984bfacfe487 100644
--- a/frontend/dockerfile/dockerfile_secrets_test.go
+++ b/frontend/dockerfile/dockerfile_secrets_test.go
@@ -1,7 +1,6 @@
package dockerfile
import (
- "os"
"testing"
"github.com/containerd/continuity/fs/fstest"
@@ -31,11 +30,11 @@ RUN --mount=type=secret,required=false,mode=741,uid=100,gid=102,target=/mysecret
RUN [ ! -f /mysecret ] # check no stub left behind
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -61,11 +60,11 @@ FROM busybox
RUN --mount=type=secret,required,id=mysecret foo
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
diff --git a/frontend/dockerfile/dockerfile_ssh_test.go b/frontend/dockerfile/dockerfile_ssh_test.go
index 9515aad12882..0714578e1bcb 100644
--- a/frontend/dockerfile/dockerfile_ssh_test.go
+++ b/frontend/dockerfile/dockerfile_ssh_test.go
@@ -5,7 +5,6 @@ import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -35,17 +34,17 @@ FROM busybox
RUN --mount=type=ssh,mode=741,uid=100,gid=102 [ "$(stat -c "%u %g %f" $SSH_AUTH_SOCK)" = "100 102 c1e1" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- k, err := rsa.GenerateKey(rand.Reader, 1024)
+ k, err := rsa.GenerateKey(rand.Reader, 2048)
require.NoError(t, err)
dt := pem.EncodeToMemory(
@@ -55,11 +54,9 @@ RUN --mount=type=ssh,mode=741,uid=100,gid=102 [ "$(stat -c "%u %g %f" $SSH_AUTH_
},
)
- tmpDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(tmpDir)
+ tmpDir := t.TempDir()
- err = ioutil.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600)
+ err = os.WriteFile(filepath.Join(tmpDir, "key"), dt, 0600)
require.NoError(t, err)
ssh, err := sshprovider.NewSSHAgentProvider([]sshprovider.AgentConfig{{
diff --git a/frontend/dockerfile/dockerfile_targets_test.go b/frontend/dockerfile/dockerfile_targets_test.go
new file mode 100644
index 000000000000..43e473c40da7
--- /dev/null
+++ b/frontend/dockerfile/dockerfile_targets_test.go
@@ -0,0 +1,193 @@
+package dockerfile
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "testing"
+
+ "github.com/containerd/continuity/fs/fstest"
+ "github.com/moby/buildkit/client"
+ "github.com/moby/buildkit/frontend/dockerfile/builder"
+ gateway "github.com/moby/buildkit/frontend/gateway/client"
+ "github.com/moby/buildkit/frontend/subrequests"
+ "github.com/moby/buildkit/frontend/subrequests/targets"
+ "github.com/moby/buildkit/util/testutil/integration"
+ "github.com/pkg/errors"
+ "github.com/stretchr/testify/require"
+)
+
+var targetsTests = integration.TestFuncs(
+ testTargetsList,
+ testTargetsDescribeDefinition,
+)
+
+func testTargetsList(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendTargets)
+ f := getFrontend(t, sb)
+ if _, ok := f.(*clientFrontend); !ok {
+ t.Skip("only test with client frontend")
+ }
+
+ dockerfile := []byte(`
+# build defines stage for compiling the binary
+FROM alpine AS build
+RUN true
+
+FROM busybox as second
+RUN false
+
+FROM alpine
+RUN false
+
+# binary returns the compiled binary
+FROM second AS binary
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", []byte(dockerfile), 0600),
+ )
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ destDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(destDir)
+
+ called := false
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res, err := c.Solve(ctx, gateway.SolveRequest{
+ FrontendOpt: map[string]string{
+ "frontend.caps": "moby.buildkit.frontend.subrequests",
+ "requestid": "frontend.targets",
+ },
+ Frontend: "dockerfile.v0",
+ })
+ require.NoError(t, err)
+
+ list, err := unmarshalTargets(res)
+ require.NoError(t, err)
+
+ require.Equal(t, 1, len(list.Sources))
+ require.Equal(t, dockerfile, list.Sources[0])
+
+ require.Equal(t, 4, len(list.Targets))
+
+ target := list.Targets[0]
+ require.Equal(t, "build", target.Name)
+ require.Equal(t, "alpine", target.Base)
+ require.Equal(t, "defines stage for compiling the binary", target.Description)
+ require.Equal(t, false, target.Default)
+ require.Equal(t, int32(0), target.Location.SourceIndex)
+ require.Equal(t, int32(3), target.Location.Ranges[0].Start.Line)
+
+ target = list.Targets[1]
+ require.Equal(t, "second", target.Name)
+ require.Equal(t, "", target.Description)
+ require.Equal(t, "busybox", target.Base)
+ require.Equal(t, false, target.Default)
+ require.Equal(t, int32(0), target.Location.SourceIndex)
+ require.Equal(t, int32(6), target.Location.Ranges[0].Start.Line)
+
+ target = list.Targets[2]
+ require.Equal(t, "", target.Name)
+ require.Equal(t, "", target.Description)
+ require.Equal(t, "alpine", target.Base)
+ require.Equal(t, false, target.Default)
+ require.Equal(t, int32(0), target.Location.SourceIndex)
+ require.Equal(t, int32(9), target.Location.Ranges[0].Start.Line)
+
+ target = list.Targets[3]
+ require.Equal(t, "binary", target.Name)
+ require.Equal(t, "returns the compiled binary", target.Description)
+ require.Equal(t, true, target.Default)
+ require.Equal(t, int32(0), target.Location.SourceIndex)
+ require.Equal(t, int32(13), target.Location.Ranges[0].Start.Line)
+
+ called = true
+ return nil, nil
+ }
+
+ _, err = c.Build(sb.Context(), client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ require.True(t, called)
+}
+
+func testTargetsDescribeDefinition(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureFrontendTargets)
+ f := getFrontend(t, sb)
+ if _, ok := f.(*clientFrontend); !ok {
+ t.Skip("only test with client frontend")
+ }
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ dockerfile := []byte(`
+FROM scratch
+COPY Dockerfile Dockerfile
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ called := false
+
+ frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ reqs, err := subrequests.Describe(ctx, c)
+ require.NoError(t, err)
+
+ require.True(t, len(reqs) > 0)
+
+ hasTargets := false
+
+ for _, req := range reqs {
+ if req.Name != "frontend.targets" {
+ continue
+ }
+ hasTargets = true
+ require.Equal(t, subrequests.RequestType("rpc"), req.Type)
+ require.NotEqual(t, req.Version, "")
+ }
+ require.True(t, hasTargets)
+
+ called = true
+ return nil, nil
+ }
+
+ _, err = c.Build(sb.Context(), client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ },
+ }, "", frontend, nil)
+ require.NoError(t, err)
+
+ require.True(t, called)
+}
+
+func unmarshalTargets(res *gateway.Result) (*targets.List, error) {
+ dt, ok := res.Metadata["result.json"]
+ if !ok {
+ return nil, errors.Errorf("missing frontend.outline")
+ }
+ var l targets.List
+ if err := json.Unmarshal(dt, &l); err != nil {
+ return nil, err
+ }
+ return &l, nil
+}
diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go
index a4ca3578c7dd..ed49f9c8502a 100644
--- a/frontend/dockerfile/dockerfile_test.go
+++ b/frontend/dockerfile/dockerfile_test.go
@@ -8,29 +8,30 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/http/httptest"
"os"
"os/exec"
+ "path"
"path/filepath"
"runtime"
"sort"
- "strconv"
"strings"
"testing"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
+ "github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/continuity/fs/fstest"
+ intoto "github.com/in-toto/in-toto-golang/in_toto"
+ controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/frontend/dockerfile/builder"
- "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
gateway "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/frontend/subrequests"
"github.com/moby/buildkit/identity"
@@ -39,16 +40,18 @@ import (
"github.com/moby/buildkit/solver/errdefs"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/contentutil"
+ "github.com/moby/buildkit/util/iohelper"
"github.com/moby/buildkit/util/testutil"
"github.com/moby/buildkit/util/testutil/httpserver"
"github.com/moby/buildkit/util/testutil/integration"
+ digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
func init() {
- if os.Getenv("TEST_DOCKERD") == "1" {
+ if integration.IsTestDockerd() {
integration.InitDockerdWorker()
} else {
integration.InitOCIWorker()
@@ -67,6 +70,7 @@ var allTests = integration.TestFuncs(
testExportedHistory,
testExposeExpansion,
testUser,
+ testUserAdditionalGids,
testCacheReleased,
testDockerignore,
testDockerignoreInvalid,
@@ -89,7 +93,6 @@ var allTests = integration.TestFuncs(
testQuotedMetaArgs,
testIgnoreEntrypoint,
testSymlinkedDockerfile,
- testDockerfileAddArchiveWildcard,
testEmptyWildcard,
testWorkdirCreatesDir,
testDockerfileAddArchiveWildcard,
@@ -116,12 +119,14 @@ var allTests = integration.TestFuncs(
testUlimit,
testCgroupParent,
testNamedImageContext,
+ testNamedImageContextPlatform,
+ testNamedImageContextTimestamps,
+ testNamedImageContextScratch,
testNamedLocalContext,
+ testNamedOCILayoutContext,
+ testNamedOCILayoutContextExport,
testNamedInputContext,
testNamedMultiplatformInputContext,
-)
-
-var fileOpTests = integration.TestFuncs(
testEmptyDestDir,
testCopyChownCreateDest,
testCopyThroughSymlinkContext,
@@ -144,6 +149,18 @@ var fileOpTests = integration.TestFuncs(
testWorkdirCopyIgnoreRelative,
testCopyFollowAllSymlinks,
testDockerfileAddChownExpand,
+ testSourceDateEpochWithoutExporter,
+ testSBOMScannerImage,
+ testProvenanceAttestation,
+ testGitProvenanceAttestation,
+ testMultiPlatformProvenance,
+ testClientFrontendProvenance,
+ testClientLLBProvenance,
+ testSecretSSHProvenance,
+ testNilProvenance,
+ testSBOMScannerArgs,
+ testMultiPlatformWarnings,
+ testNilContextInSolveGateway,
)
// Tests that depend on the `security.*` entitlements
@@ -155,6 +172,11 @@ var networkTests = []integration.Test{}
// Tests that depend on heredoc support
var heredocTests = []integration.Test{}
+// Tests that depend on reproducible env
+var reproTests = integration.TestFuncs(
+ testReproSourceDateEpoch,
+)
+
var opts []integration.TestOpt
var securityOpts []integration.TestOpt
@@ -170,9 +192,6 @@ func init() {
opts = []integration.TestOpt{
integration.WithMirroredImages(integration.OfficialImages("busybox:latest")),
- integration.WithMirroredImages(map[string]string{
- "docker/dockerfile-copy:v0.1.9": "docker.io/" + dockerfile2llb.DefaultCopyImage,
- }),
integration.WithMatrix("frontend", frontends),
}
@@ -194,10 +213,6 @@ func init() {
func TestIntegration(t *testing.T) {
integration.Run(t, allTests, opts...)
- integration.Run(t, fileOpTests, append(opts, integration.WithMatrix("fileop", map[string]interface{}{
- "true": true,
- "false": false,
- }))...)
integration.Run(t, securityTests, append(append(opts, securityOpts...),
integration.WithMatrix("security.insecure", map[string]interface{}{
"granted": securityInsecureGranted,
@@ -209,6 +224,12 @@ func TestIntegration(t *testing.T) {
"denied": networkHostDenied,
}))...)
integration.Run(t, heredocTests, opts...)
+ integration.Run(t, outlineTests, opts...)
+ integration.Run(t, targetsTests, opts...)
+
+ integration.Run(t, reproTests, append(opts,
+ // Only use the amd64 digest, regardless to the host platform
+ integration.WithMirroredImages(integration.OfficialImages("debian:bullseye-20230109-slim@sha256:1acb06a0c31fb467eb8327ad361f1091ab265e0bf26d452dea45dcb0c0ea5e75")))...)
}
func testDefaultEnvWithArgs(t *testing.T, sb integration.Sandbox) {
@@ -229,20 +250,18 @@ COPY --from=build /out /out
echo -n $my_arg $1 > /out
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("myscript.sh", script, 0700),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
for _, x := range []struct {
name string
@@ -269,7 +288,7 @@ echo -n $my_arg $1 > /out
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Equal(t, x.expected, string(dt))
})
@@ -285,19 +304,17 @@ ENV myenv foo%sbar
RUN [ "$myenv" = 'foo%sbar' ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -336,7 +353,8 @@ RUN [ ! -f foo ] && [ -f bar ]
foo
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("Dockerfile.dockerignore", ignore, 0600),
fstest.CreateFile("Dockerfile2", dockerfile2, 0600),
@@ -345,7 +363,6 @@ foo
fstest.CreateFile("bar", []byte("contents0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -373,7 +390,6 @@ foo
func testEmptyDestDir(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox
@@ -382,21 +398,18 @@ COPY testfile $empty
RUN [ "$(cat testfile)" == "contents0" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("testfile", []byte("contents0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -406,6 +419,7 @@ RUN [ "$(cat testfile)" == "contents0" ]
}
func testExportCacheLoop(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
f := getFrontend(t, sb)
dockerfile := []byte(`
@@ -424,16 +438,14 @@ FROM scratch
COPY --from=base2 /foo /f
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("hello.txt", []byte("hello"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- cacheDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(cacheDir)
+ cacheDir := t.TempDir()
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -497,13 +509,13 @@ COPY bar fordarwin
FROM stage-$TARGETOS
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("data"), 0600),
fstest.CreateFile("bar", []byte("data2"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -571,19 +583,17 @@ WORKDIR /foo
WORKDIR /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -611,11 +621,11 @@ func testCacheReleased(t *testing.T, sb integration.Sandbox) {
FROM busybox
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -648,12 +658,12 @@ FROM scratch
ENV foo bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile.web", dockerfile, 0600),
fstest.Symlink("Dockerfile.web", "Dockerfile"),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -731,12 +741,12 @@ RUN e="300:400"; p="/file" ; a=` + "`" + `stat -c "%u:%g
&& e="300:400"; p="/existingdir/subdir/nestedfile"; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile.web", dockerfile, 0600),
fstest.Symlink("Dockerfile.web", "Dockerfile"),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -774,22 +784,20 @@ FROM scratch
COPY --from=base unique /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo1", []byte("foo1-data"), 0600),
fstest.CreateFile("foo2", []byte("foo2-data"), 0600),
fstest.CreateFile("bar", []byte("bar-data"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -805,15 +813,13 @@ COPY --from=base unique /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
- err = ioutil.WriteFile(filepath.Join(dir, "bar"), []byte("bar-data-mod"), 0600)
+ err = os.WriteFile(filepath.Join(dir, "bar"), []byte("bar-data-mod"), 0600)
require.NoError(t, err)
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -829,16 +835,14 @@ COPY --from=base unique /
}, nil)
require.NoError(t, err)
- dt2, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ dt2, err := os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
require.Equal(t, string(dt), string(dt2))
- err = ioutil.WriteFile(filepath.Join(dir, "foo2"), []byte("foo2-data-mod"), 0600)
+ err = os.WriteFile(filepath.Join(dir, "foo2"), []byte("foo2-data-mod"), 0600)
require.NoError(t, err)
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -854,7 +858,7 @@ COPY --from=base unique /
}, nil)
require.NoError(t, err)
- dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ dt2, err = os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
require.NotEqual(t, string(dt), string(dt2))
}
@@ -867,20 +871,18 @@ FROM scratch
COPY foo nomatch* /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("contents0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -896,14 +898,13 @@ COPY foo nomatch* /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "contents0", string(dt))
}
func testWorkdirUser(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox
@@ -913,20 +914,17 @@ WORKDIR /mydir
RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -937,7 +935,6 @@ RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ]
func testWorkdirCopyIgnoreRelative(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch AS base
@@ -948,20 +945,17 @@ FROM scratch
COPY --from=base Dockerfile .
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -972,7 +966,6 @@ COPY --from=base Dockerfile .
func testWorkdirExists(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox
@@ -982,20 +975,17 @@ WORKDIR /mydir
RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1006,7 +996,6 @@ RUN [ "$(stat -c "%U %G" /mydir)" == "user user" ]
func testCopyChownCreateDest(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox
@@ -1020,11 +1009,11 @@ RUN [ "$(stat -c "%U %G" /dest)" == "user user" ]
RUN [ "$(stat -c "%U %G" /dest01)" == "user01 user" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -1032,8 +1021,7 @@ RUN [ "$(stat -c "%U %G" /dest01)" == "user01 user" ]
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- "build-arg:group": "user",
+ "build-arg:group": "user",
},
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
@@ -1045,29 +1033,26 @@ RUN [ "$(stat -c "%U %G" /dest01)" == "user01 user" ]
func testCopyThroughSymlinkContext(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch
COPY link/foo .
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.Symlink("sub", "link"),
fstest.CreateDir("sub", 0700),
fstest.CreateFile("sub/foo", []byte(`contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -1076,9 +1061,6 @@ COPY link/foo .
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1086,14 +1068,13 @@ COPY link/foo .
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "contents", string(dt))
}
func testCopyThroughSymlinkMultiStage(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox AS build
@@ -1103,19 +1084,17 @@ COPY --from=build /sub/foo .
COPY --from=build /sub2/foo bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -1124,9 +1103,6 @@ COPY --from=build /sub2/foo bar
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1134,34 +1110,31 @@ COPY --from=build /sub2/foo bar
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "data", string(dt))
}
func testCopySocket(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch
COPY . /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateSocket("socket.sock", 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -1170,9 +1143,6 @@ COPY . /
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1195,11 +1165,11 @@ ENTRYPOINT ["/nosuchcmd"]
RUN ["ls"]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -1228,19 +1198,17 @@ FROM scratch
COPY --from=build /out .
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
LocalDirs: map[string]string{
@@ -1256,7 +1224,7 @@ COPY --from=build /out .
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Equal(t, "bar-box-foo", string(dt))
}
@@ -1274,19 +1242,17 @@ FROM scratch
COPY --from=build /out .
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
LocalDirs: map[string]string{
@@ -1302,13 +1268,13 @@ COPY --from=build /out .
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Equal(t, "foo bar:box-foo:123 456", string(dt))
}
func testDefaultShellAndPath(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter)
f := getFrontend(t, sb)
dockerfile := []byte(`
@@ -1317,19 +1283,17 @@ ENTRYPOINT foo bar
COPY Dockerfile .
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
out := filepath.Join(destDir, "out.tar")
outW, err := os.Create(out)
@@ -1352,7 +1316,7 @@ COPY Dockerfile .
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out.tar"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out.tar"))
require.NoError(t, err)
m, err := testutil.ReadTarToMap(dt, false)
@@ -1398,7 +1362,7 @@ COPY Dockerfile .
}
func testExportMultiPlatform(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureMultiPlatform)
f := getFrontend(t, sb)
dockerfile := []byte(`
@@ -1409,7 +1373,8 @@ LABEL target=$TARGETPLATFORM
COPY arch-$TARGETARCH whoami
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("arch-arm", []byte(`i am arm`), 0600),
fstest.CreateFile("arch-amd64", []byte(`i am amd64`), 0600),
@@ -1417,15 +1382,12 @@ COPY arch-$TARGETARCH whoami
fstest.CreateFile("arch-ppc64le", []byte(`i am ppc64le`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
LocalDirs: map[string]string{
@@ -1444,23 +1406,21 @@ COPY arch-$TARGETARCH whoami
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "windows_amd64/whoami"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "windows_amd64/whoami"))
require.NoError(t, err)
require.Equal(t, "i am amd64", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "linux_arm_v7/whoami"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "linux_arm_v7/whoami"))
require.NoError(t, err)
require.Equal(t, "i am arm", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "linux_s390x/whoami"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "linux_s390x/whoami"))
require.NoError(t, err)
require.Equal(t, "i am s390x", string(dt))
// repeat with oci exporter
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
out := filepath.Join(destDir, "out.tar")
outW, err := os.Create(out)
@@ -1483,7 +1443,7 @@ COPY arch-$TARGETARCH whoami
}, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "out.tar"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out.tar"))
require.NoError(t, err)
m, err := testutil.ReadTarToMap(dt, false)
@@ -1540,29 +1500,25 @@ COPY arch-$TARGETARCH whoami
// tonistiigi/fsutil#46
func testContextChangeDirToFile(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch
COPY foo /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateDir("foo", 0700),
fstest.CreateFile("foo/bar", []byte(`contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1570,16 +1526,14 @@ COPY foo /
}, nil)
require.NoError(t, err)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`contents2`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -1588,9 +1542,6 @@ COPY foo /
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1598,35 +1549,31 @@ COPY foo /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "contents2", string(dt))
}
func testNoSnapshotLeak(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch
COPY foo /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1638,9 +1585,6 @@ COPY foo /
require.NoError(t, err)
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1657,7 +1601,6 @@ COPY foo /
// #1197
func testCopyFollowAllSymlinks(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch
@@ -1665,27 +1608,20 @@ COPY foo /
COPY foo/sub bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
fstest.CreateDir("foo", 0700),
fstest.Symlink("../bar", "foo/sub"),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1696,7 +1632,6 @@ COPY foo/sub bar
func testCopySymlinks(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch
@@ -1704,7 +1639,8 @@ COPY foo /
COPY sub/l* alllinks/
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
fstest.Symlink("bar", "foo"),
@@ -1717,15 +1653,12 @@ COPY sub/l* alllinks/
fstest.CreateFile("sub/baz", []byte(`baz-contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -1734,9 +1667,6 @@ COPY sub/l* alllinks/
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -1744,19 +1674,19 @@ COPY sub/l* alllinks/
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "bar-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l0"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "alllinks/l0"))
require.NoError(t, err)
require.Equal(t, "subfile-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/lfile"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "alllinks/lfile"))
require.NoError(t, err)
require.Equal(t, "lfile-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l1"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "alllinks/l1"))
require.NoError(t, err)
require.Equal(t, "baz-contents", string(dt))
}
@@ -1771,11 +1701,9 @@ FROM scratch
COPY --from=0 /foo /foo
`)
- srcDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(srcDir)
+ srcDir := t.TempDir()
- err = ioutil.WriteFile(filepath.Join(srcDir, "Dockerfile"), dockerfile, 0600)
+ err := os.WriteFile(filepath.Join(srcDir, "Dockerfile"), dockerfile, 0600)
require.NoError(t, err)
resp := httpserver.Response{
@@ -1788,9 +1716,7 @@ COPY --from=0 /foo /foo
})
defer server.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -1810,7 +1736,7 @@ COPY --from=0 /foo /foo
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
}
@@ -1828,11 +1754,11 @@ FROM scratch
CMD ["test"]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -1861,11 +1787,11 @@ SHELL ["ls"]
ENTRYPOINT my entrypoint
`)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
target = "docker.io/moby/cmdoverridetest2:latest"
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
@@ -1920,11 +1846,11 @@ FROM scratch
LABEL foo=bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -1953,12 +1879,12 @@ LABEL bar=baz
COPY foo .
`)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo-contents"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
target = "docker.io/moby/testpullscratch2:latest"
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
@@ -2012,9 +1938,7 @@ COPY foo .
def, err := echo.Marshal(sb.Context())
require.NoError(t, err)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Solve(sb.Context(), def, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -2030,7 +1954,7 @@ COPY foo .
}, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "foo0", string(dt))
}
@@ -2043,11 +1967,11 @@ ARG tag=nosuchtag
FROM busybox:${tag}
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -2066,7 +1990,6 @@ FROM busybox:${tag}
}
func testDockerfileDirs(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -2080,12 +2003,12 @@ func testDockerfileDirs(t *testing.T, sb integration.Sandbox) {
RUN cmp -s foo foo3
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("bar"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace := f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
@@ -2108,17 +2031,17 @@ func testDockerfileDirs(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
// different context and dockerfile directories
- dir1, err := tmpdir(
+ dir1, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir1)
- dir2, err := tmpdir(
+ dir2, err := integration.Tmpdir(
+ t,
fstest.CreateFile("foo", []byte("bar"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir2)
args, trace = f.DFCmdArgs(dir2, dir1)
defer os.RemoveAll(trace)
@@ -2135,7 +2058,6 @@ func testDockerfileDirs(t *testing.T, sb integration.Sandbox) {
}
func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
dockerfile := []byte(`
@@ -2143,11 +2065,11 @@ func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) {
RUN invalidcmd
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace := f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
@@ -2162,7 +2084,6 @@ func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) {
}
func testDockerfileInvalidInstruction(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
dockerfile := []byte(`
@@ -2170,11 +2091,11 @@ func testDockerfileInvalidInstruction(t *testing.T, sb integration.Sandbox) {
FNTRYPOINT ["/bin/sh", "-c", "echo invalidinstruction"]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -2193,7 +2114,6 @@ func testDockerfileInvalidInstruction(t *testing.T, sb integration.Sandbox) {
}
func testDockerfileADDFromURL(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -2221,24 +2141,22 @@ FROM scratch
ADD %s /dest/
`, server.URL+"/foo"))
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace := f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err := tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
- cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd := sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
err = cmd.Run()
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "dest/foo"))
require.NoError(t, err)
require.Equal(t, []byte("content1"), dt)
@@ -2248,25 +2166,23 @@ FROM scratch
ADD %s /dest/
`, server.URL+"/"))
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace = f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err = tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
- cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
err = cmd.Run()
require.NoError(t, err)
destFile := filepath.Join(destDir, "dest/__unnamed__")
- dt, err = ioutil.ReadFile(destFile)
+ dt, err = os.ReadFile(destFile)
require.NoError(t, err)
require.Equal(t, []byte("content2"), dt)
@@ -2276,7 +2192,6 @@ ADD %s /dest/
}
func testDockerfileAddArchive(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -2300,24 +2215,22 @@ FROM scratch
ADD t.tar /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar", buf.Bytes(), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace := f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err := tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
- cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd := sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
require.NoError(t, cmd.Run())
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, expectedContent, dt)
@@ -2334,24 +2247,22 @@ FROM scratch
ADD t.tar.gz /
`)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace = f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err = tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
- cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
require.NoError(t, cmd.Run())
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, expectedContent, dt)
@@ -2361,24 +2272,22 @@ FROM scratch
COPY t.tar.gz /
`)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace = f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err = tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
- cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
require.NoError(t, cmd.Run())
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "t.tar.gz"))
require.NoError(t, err)
require.Equal(t, buf2.Bytes(), dt)
@@ -2398,23 +2307,21 @@ FROM scratch
ADD %s /
`, server.URL+"/t.tar.gz"))
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace = f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err = tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
- cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
require.NoError(t, cmd.Run())
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "t.tar.gz"))
require.NoError(t, err)
require.Equal(t, buf2.Bytes(), dt)
@@ -2424,23 +2331,21 @@ FROM scratch
ADD %s /newname.tar.gz
`, server.URL+"/t.tar.gz"))
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace = f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err = tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
- cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd = sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
require.NoError(t, cmd.Run())
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "newname.tar.gz"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "newname.tar.gz"))
require.NoError(t, err)
require.Equal(t, buf2.Bytes(), dt)
}
@@ -2483,17 +2388,15 @@ FROM scratch
ADD *.tar /dest
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar", buf.Bytes(), 0600),
fstest.CreateFile("b.tar", buf2.Bytes(), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -2513,18 +2416,17 @@ ADD *.tar /dest
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "dest/foo"))
require.NoError(t, err)
require.Equal(t, "content0", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/bar"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "dest/bar"))
require.NoError(t, err)
require.Equal(t, "content1", string(dt))
}
func testDockerfileAddChownExpand(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox
@@ -2534,12 +2436,12 @@ ADD --chown=${owner}:${group} foo /
RUN [ "$(stat -c "%u %G" /foo)" == "1000 nobody" ]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -2547,8 +2449,7 @@ RUN [ "$(stat -c "%u %G" /foo)" == "1000 nobody" ]
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- "build-arg:group": "nobody",
+ "build-arg:group": "nobody",
},
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
@@ -2559,7 +2460,6 @@ RUN [ "$(stat -c "%u %G" /foo)" == "1000 nobody" ]
}
func testSymlinkDestination(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -2582,25 +2482,23 @@ ADD t.tar /
COPY foo /symlink/
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", expectedContent, 0600),
fstest.CreateFile("t.tar", buf.Bytes(), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace := f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
- destDir, err := tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
- cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
+ cmd := sb.Cmd(args + fmt.Sprintf(" --output type=local,dest=%s", destDir))
require.NoError(t, cmd.Run())
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "tmp/symlink-target/foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "tmp/symlink-target/foo"))
require.NoError(t, err)
require.Equal(t, expectedContent, dt)
}
@@ -2618,17 +2516,17 @@ FROM scratch
ENV foo=bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace := f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
target := "example.com/moby/dockerfilescratch:test"
- cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target)
+ cmd := sb.Cmd(args + " --output type=image,name=" + target)
err = cmd.Run()
require.NoError(t, err)
@@ -2673,8 +2571,7 @@ ENV foo=bar
}
func testExposeExpansion(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
-
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
f := getFrontend(t, sb)
dockerfile := []byte(`
@@ -2684,11 +2581,11 @@ EXPOSE $PORTS
EXPOSE 5000
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -2764,7 +2661,8 @@ Dockerfile
.dockerignore
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
@@ -2773,15 +2671,12 @@ Dockerfile
fstest.CreateFile(".dockerignore", dockerignore, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -2797,7 +2692,7 @@ Dockerfile
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
@@ -2817,7 +2712,7 @@ Dockerfile
require.Error(t, err)
require.True(t, errors.Is(err, os.ErrNotExist))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "bay"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "bay"))
require.NoError(t, err)
require.Equal(t, "bay-contents", string(dt))
}
@@ -2830,12 +2725,12 @@ FROM scratch
COPY . .
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile(".dockerignore", []byte("!\n"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
ctx, cancel := context.WithTimeout(sb.Context(), 15*time.Second)
defer cancel()
@@ -2866,11 +2761,11 @@ func testDockerfileLowercase(t *testing.T, sb integration.Sandbox) {
dockerfile := []byte(`FROM scratch
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
ctx := sb.Context()
@@ -2888,7 +2783,6 @@ func testDockerfileLowercase(t *testing.T, sb integration.Sandbox) {
}
func testExportedHistory(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -2905,18 +2799,20 @@ RUN echo bar > foo4
RUN ["ls"]
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("contents0"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
args, trace := f.DFCmdArgs(dir, dir)
defer os.RemoveAll(trace)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
+
target := "example.com/moby/dockerfilescratch:test"
- cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target)
+ cmd := sb.Cmd(args + " --output type=image,name=" + target)
require.NoError(t, cmd.Run())
// TODO: expose this test to OCI worker
@@ -2966,17 +2862,8 @@ RUN ["ls"]
require.NotNil(t, ociimg.History[6].Created)
}
-func skipDockerd(t *testing.T, sb integration.Sandbox) {
- // TODO: remove me once dockerd supports the image and exporter.
- t.Helper()
- if os.Getenv("TEST_DOCKERD") == "1" {
- t.Skip("dockerd missing a required exporter, cache exporter, or entitlement")
- }
-}
-
func testUser(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
-
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
f := getFrontend(t, sb)
dockerfile := []byte(`
@@ -3039,19 +2926,17 @@ COPY --from=base /out /
USER nobody
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3067,11 +2952,11 @@ USER nobody
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "rootuser"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "rootuser"))
require.NoError(t, err)
require.Equal(t, "root\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "daemonuser"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "daemonuser"))
require.NoError(t, err)
require.Equal(t, "daemon\n", string(dt))
@@ -3120,9 +3005,45 @@ USER nobody
require.Equal(t, "nobody", ociimg.Config.User)
}
+// testUserAdditionalGids ensures that that the primary GID is also included in the additional GID list.
+// CVE-2023-25173: https://github.com/advisories/GHSA-hmfx-3pcx-653p
+func testUserAdditionalGids(t *testing.T, sb integration.Sandbox) {
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+# Mimics the tests in https://github.com/containerd/containerd/commit/3eda46af12b1deedab3d0802adb2e81cb3521950
+FROM busybox
+SHELL ["/bin/sh", "-euxc"]
+RUN [ "$(id)" = "uid=0(root) gid=0(root) groups=0(root),10(wheel)" ]
+USER 1234
+RUN [ "$(id)" = "uid=1234 gid=0(root) groups=0(root)" ]
+USER 1234:1234
+RUN [ "$(id)" = "uid=1234 gid=1234 groups=1234" ]
+USER daemon
+RUN [ "$(id)" = "uid=1(daemon) gid=1(daemon) groups=1(daemon)" ]
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
+ require.NoError(t, err)
+}
+
func testCopyChown(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox AS base
@@ -3139,22 +3060,20 @@ FROM scratch
COPY --from=base /out /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
fstest.CreateDir("bar", 0700),
fstest.CreateFile("bar/sub", nil, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3164,8 +3083,7 @@ COPY --from=base /out /
},
},
FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- "build-arg:group": "nobody",
+ "build-arg:group": "nobody",
},
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
@@ -3174,22 +3092,21 @@ COPY --from=base /out /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "fooowner"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "fooowner"))
require.NoError(t, err)
require.Equal(t, "daemon daemon\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "subowner"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "subowner"))
require.NoError(t, err)
require.Equal(t, "1000 nobody\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foobisowner"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "foobisowner"))
require.NoError(t, err)
require.Equal(t, "1000 nobody\n", string(dt))
}
func testCopyChmod(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox AS base
@@ -3206,22 +3123,19 @@ FROM scratch
COPY --from=base /out /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0700),
)
-
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3230,37 +3144,29 @@ COPY --from=base /out /
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
},
}, nil)
- if !isFileOp {
- require.Contains(t, err.Error(), "chmod is not supported")
- return
- }
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "fooperm"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "fooperm"))
require.NoError(t, err)
require.Equal(t, "0644\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "barperm"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "barperm"))
require.NoError(t, err)
require.Equal(t, "0777\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foobisperm"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "foobisperm"))
require.NoError(t, err)
require.Equal(t, "0000\n", string(dt))
}
func testCopyOverrideFiles(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch AS base
@@ -3271,7 +3177,8 @@ COPY files/foo.go dest/foo.go
COPY files dest
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateDir("sub", 0700),
fstest.CreateDir("sub/dir1", 0700),
@@ -3281,15 +3188,12 @@ COPY files dest
fstest.CreateFile("files/foo.go", []byte(`foo.go-contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3298,9 +3202,6 @@ COPY files dest
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -3308,18 +3209,17 @@ COPY files dest
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "sub/dir1/dir2/foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "sub/dir1/dir2/foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/foo.go"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "dest/foo.go"))
require.NoError(t, err)
require.Equal(t, "foo.go-contents", string(dt))
}
func testCopyVarSubstitution(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch AS base
@@ -3327,21 +3227,19 @@ ENV FOO bar
COPY $FOO baz
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3350,9 +3248,6 @@ COPY $FOO baz
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -3360,14 +3255,13 @@ COPY $FOO baz
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "baz"))
require.NoError(t, err)
require.Equal(t, "bar-contents", string(dt))
}
func testCopyWildcards(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch AS base
@@ -3382,7 +3276,8 @@ COPY sub/dir1/. subdest5
COPY sub/dir1 subdest6
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo.go", []byte(`foo-contents`), 0600),
fstest.CreateFile("bar.go", []byte(`bar-contents`), 0600),
@@ -3392,15 +3287,12 @@ COPY sub/dir1 subdest6
fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3409,9 +3301,6 @@ COPY sub/dir1 subdest6
OutputDir: destDir,
},
},
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -3419,52 +3308,49 @@ COPY sub/dir1 subdest6
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "gofiles/foo.go"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "gofiles/foo.go"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "gofiles/bar.go"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "gofiles/bar.go"))
require.NoError(t, err)
require.Equal(t, "bar-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo2.go"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "foo2.go"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- if isFileOp { // non-fileop implementation is historically buggy
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir2/foo"))
- require.NoError(t, err)
- require.Equal(t, "foo-contents", string(dt))
- }
+ dt, err = os.ReadFile(filepath.Join(destDir, "subdest/dir2/foo"))
+ require.NoError(t, err)
+ require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest2/foo"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "subdest2/foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest3/bar"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "subdest3/bar"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "all/foo.go"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "all/foo.go"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest4/dir2/foo"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "subdest4/dir2/foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest5/dir2/foo"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "subdest5/dir2/foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest6/dir2/foo"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "subdest6/dir2/foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
}
func testCopyRelative(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM busybox
@@ -3490,21 +3376,18 @@ COPY foo ../
RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`hello`), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
- FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- },
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
@@ -3514,7 +3397,6 @@ RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
}
func testAddURLChmod(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
f.RequiresBuildctl(t)
@@ -3540,19 +3422,17 @@ FROM scratch
COPY --from=build /dest /dest
`, server.URL+"/foo"))
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := tmpdir()
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3568,7 +3448,7 @@ COPY --from=build /dest /dest
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "dest"))
require.NoError(t, err)
require.Equal(t, []byte("0644\n0755\n0413\n"), dt)
}
@@ -3576,9 +3456,7 @@ COPY --from=build /dest /dest
func testDockerfileFromGit(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- gitDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(gitDir)
+ gitDir := t.TempDir()
dockerfile := `
FROM busybox AS build
@@ -3587,7 +3465,7 @@ FROM scratch
COPY --from=build foo bar
`
- err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
+ err := os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
require.NoError(t, err)
err = runShell(gitDir,
@@ -3604,7 +3482,7 @@ COPY --from=build foo bar
COPY --from=build foo bar2
`
- err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
+ err = os.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
require.NoError(t, err)
err = runShell(gitDir,
@@ -3617,9 +3495,7 @@ COPY --from=build foo bar2
server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir))))
defer server.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -3638,7 +3514,7 @@ COPY --from=build foo bar2
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
@@ -3647,9 +3523,7 @@ COPY --from=build foo bar2
require.True(t, errors.Is(err, os.ErrNotExist))
// second request from master branch contains both files
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -3664,11 +3538,11 @@ COPY --from=build foo bar2
}, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "bar2"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
}
@@ -3709,9 +3583,7 @@ COPY foo bar
})
defer server.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -3731,7 +3603,7 @@ COPY foo bar
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
}
@@ -3744,19 +3616,17 @@ FROM scratch
COPY --from=busybox /etc/passwd test
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3772,7 +3642,7 @@ COPY --from=busybox /etc/passwd test
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "test"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "test"))
require.NoError(t, err)
require.Contains(t, string(dt), "root")
@@ -3780,21 +3650,19 @@ COPY --from=busybox /etc/passwd test
dockerfile = []byte(`
FROM busybox AS golang
-RUN mkdir /usr/bin && echo -n foo > /usr/bin/go
+RUN mkdir -p /usr/bin && echo -n foo > /usr/bin/go
FROM scratch
COPY --from=golang /usr/bin/go go
`)
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3810,7 +3678,7 @@ COPY --from=golang /usr/bin/go go
}, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "go"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "go"))
require.NoError(t, err)
require.Contains(t, string(dt), "foo")
}
@@ -3826,20 +3694,18 @@ COPY --from=staGE0 bar baz
FROM scratch
COPY --from=stage1 baz bax
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo-contents"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -3858,33 +3724,29 @@ COPY --from=stage1 baz bax
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "baz"))
require.NoError(t, err)
require.Contains(t, string(dt), "foo-contents")
}
func testLabels(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
f := getFrontend(t, sb)
dockerfile := []byte(`
FROM scratch
LABEL foo=bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
target := "example.com/moby/dockerfilelabels:test"
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -3940,7 +3802,6 @@ LABEL foo=bar
// #2008
func testWildcardRenameCache(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
f := getFrontend(t, sb)
dockerfile := []byte(`
@@ -3948,21 +3809,17 @@ FROM alpine
COPY file* /files/
RUN ls /files/file1
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("file1", []byte("foo"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
@@ -3985,6 +3842,7 @@ RUN ls /files/file1
}
func testOnBuildCleared(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
@@ -3998,11 +3856,11 @@ FROM busybox
ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -4031,11 +3889,11 @@ ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo
FROM %s
`, target))
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
target2 := registry + "/buildkit/testonbuild:child"
@@ -4062,15 +3920,13 @@ ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo
COPY --from=base /out /
`, target2))
- dir, err = tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -4086,13 +3942,13 @@ ONBUILD RUN mkdir -p /out && echo -n 11 >> /out/foo
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "11", string(dt))
}
func testCacheMultiPlatformImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
@@ -4110,11 +3966,11 @@ COPY --from=base unique /
COPY --from=base arch /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -4161,14 +4017,14 @@ COPY --from=base arch /
desc, provider, err := contentutil.ProviderFromRef(target + "-img")
require.NoError(t, err)
- imgMap, err := readIndex(sb.Context(), provider, desc)
+ imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
- require.Equal(t, 2, len(imgMap))
+ require.Equal(t, 2, len(imgs.Images))
- require.Equal(t, "amd64", string(imgMap["linux/amd64"].layers[1]["arch"].Data))
- dtamd := imgMap["linux/amd64"].layers[0]["unique"].Data
- dtarm := imgMap["linux/arm/v7"].layers[0]["unique"].Data
+ require.Equal(t, "amd64", string(imgs.Find("linux/amd64").Layers[1]["arch"].Data))
+ dtamd := imgs.Find("linux/amd64").Layers[0]["unique"].Data
+ dtarm := imgs.Find("linux/arm/v7").Layers[0]["unique"].Data
require.NotEqual(t, dtamd, dtarm)
for i := 0; i < 2; i++ {
@@ -4201,21 +4057,21 @@ COPY --from=base arch /
require.Equal(t, desc.Digest, desc2.Digest)
- imgMap, err = readIndex(sb.Context(), provider, desc2)
+ imgs, err = testutil.ReadImages(sb.Context(), provider, desc2)
require.NoError(t, err)
- require.Equal(t, 2, len(imgMap))
+ require.Equal(t, 2, len(imgs.Images))
- require.Equal(t, "arm", string(imgMap["linux/arm/v7"].layers[1]["arch"].Data))
- dtamd2 := imgMap["linux/amd64"].layers[0]["unique"].Data
- dtarm2 := imgMap["linux/arm/v7"].layers[0]["unique"].Data
+ require.Equal(t, "arm", string(imgs.Find("linux/arm/v7").Layers[1]["arch"].Data))
+ dtamd2 := imgs.Find("linux/amd64").Layers[0]["unique"].Data
+ dtarm2 := imgs.Find("linux/arm/v7").Layers[0]["unique"].Data
require.Equal(t, string(dtamd), string(dtamd2))
require.Equal(t, string(dtarm), string(dtarm2))
}
}
func testCacheImportExport(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureCacheExport)
f := getFrontend(t, sb)
registry, err := sb.NewRegistry()
@@ -4234,20 +4090,18 @@ COPY --from=base const /
COPY --from=base unique /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foobar"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
target := registry + "/buildkit/testexportdf:latest"
@@ -4271,18 +4125,16 @@ COPY --from=base unique /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "const"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "const"))
require.NoError(t, err)
require.Equal(t, "foobar", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
ensurePruneAll(t, c, sb)
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -4301,21 +4153,17 @@ COPY --from=base unique /
}, nil)
require.NoError(t, err)
- dt2, err := ioutil.ReadFile(filepath.Join(destDir, "const"))
+ dt2, err := os.ReadFile(filepath.Join(destDir, "const"))
require.NoError(t, err)
require.Equal(t, "foobar", string(dt2))
- dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ dt2, err = os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
require.Equal(t, string(dt), string(dt2))
-
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
}
func testReproducibleIDs(t *testing.T, sb integration.Sandbox) {
- skipDockerd(t, sb)
+ integration.CheckFeatureCompat(t, sb, integration.FeatureImageExporter)
f := getFrontend(t, sb)
dockerfile := []byte(`
@@ -4324,21 +4172,17 @@ ENV foo=bar
COPY foo /
RUN echo bar > bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo-contents"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
target := "example.com/moby/dockerfileids:test"
opt := client.SolveOpt{
FrontendAttrs: map[string]string{},
@@ -4405,21 +4249,17 @@ COPY foo /
RUN echo bar > bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foobar"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
target := "example.com/moby/dockerfileexpids:test"
cacheTarget := registry + "/test/dockerfileexpids:cache"
opt := client.SolveOpt{
@@ -4487,19 +4327,17 @@ FROM scratch
COPY --from=s0 unique /
COPY --from=s1 unique2 /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
opt := client.SolveOpt{
FrontendAttrs: map[string]string{},
@@ -4518,9 +4356,7 @@ COPY --from=s1 unique2 /
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- destDir2, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir2 := t.TempDir()
opt.FrontendAttrs["no-cache"] = ""
opt.Exports[0].OutputDir = destDir2
@@ -4528,24 +4364,22 @@ COPY --from=s1 unique2 /
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- unique1Dir1, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
+ unique1Dir1, err := os.ReadFile(filepath.Join(destDir, "unique"))
require.NoError(t, err)
- unique1Dir2, err := ioutil.ReadFile(filepath.Join(destDir2, "unique"))
+ unique1Dir2, err := os.ReadFile(filepath.Join(destDir2, "unique"))
require.NoError(t, err)
- unique2Dir1, err := ioutil.ReadFile(filepath.Join(destDir, "unique2"))
+ unique2Dir1, err := os.ReadFile(filepath.Join(destDir, "unique2"))
require.NoError(t, err)
- unique2Dir2, err := ioutil.ReadFile(filepath.Join(destDir2, "unique2"))
+ unique2Dir2, err := os.ReadFile(filepath.Join(destDir2, "unique2"))
require.NoError(t, err)
require.NotEqual(t, string(unique1Dir1), string(unique1Dir2))
require.NotEqual(t, string(unique2Dir1), string(unique2Dir2))
- destDir3, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir3 := t.TempDir()
opt.FrontendAttrs["no-cache"] = "s1"
opt.Exports[0].OutputDir = destDir3
@@ -4553,10 +4387,10 @@ COPY --from=s1 unique2 /
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- unique1Dir3, err := ioutil.ReadFile(filepath.Join(destDir3, "unique"))
+ unique1Dir3, err := os.ReadFile(filepath.Join(destDir3, "unique"))
require.NoError(t, err)
- unique2Dir3, err := ioutil.ReadFile(filepath.Join(destDir3, "unique2"))
+ unique2Dir3, err := os.ReadFile(filepath.Join(destDir3, "unique2"))
require.NoError(t, err)
require.Equal(t, string(unique1Dir2), string(unique1Dir3))
@@ -4573,21 +4407,19 @@ FROM build-${TARGETOS}
COPY foo2 bar2
`, runtime.GOOS))
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("d0"), 0600),
fstest.CreateFile("foo2", []byte("d1"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
opt := client.SolveOpt{
Exports: []client.ExportEntry{
@@ -4605,11 +4437,11 @@ COPY foo2 bar2
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "d0", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "bar2"))
require.NoError(t, err)
require.Equal(t, "d1", string(dt))
}
@@ -4626,19 +4458,17 @@ FROM scratch
COPY --from=build out .
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
opt := client.SolveOpt{
Exports: []client.ExportEntry{
@@ -4660,11 +4490,11 @@ COPY --from=build out .
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "platform"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "platform"))
require.NoError(t, err)
require.Equal(t, "darwin/ppc64le", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "os"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "os"))
require.NoError(t, err)
require.Equal(t, "freebsd", string(dt))
}
@@ -4682,19 +4512,17 @@ FROM scratch
COPY --from=build /out /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
opt := client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -4717,14 +4545,12 @@ COPY --from=build /out /
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Equal(t, "hpvalue::npvalue::foocontents::::bazcontent", string(dt))
// repeat with changed default args should match the old cache
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
opt = client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -4746,14 +4572,12 @@ COPY --from=build /out /
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Equal(t, "hpvalue::npvalue::foocontents::::bazcontent", string(dt))
// changing actual value invalidates cache
- destDir, err = ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir = t.TempDir()
opt = client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -4775,14 +4599,13 @@ COPY --from=build /out /
_, err = f.Solve(sb.Context(), c, opt, nil)
require.NoError(t, err)
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Equal(t, "hpvalue2::::foocontents2::::bazcontent", string(dt))
}
func testTarContext(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
dockerfile := []byte(`
FROM scratch
@@ -4823,8 +4646,7 @@ COPY foo /
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- "context": url,
+ "context": url,
},
Session: []session.Attachable{up},
}, nil)
@@ -4833,7 +4655,6 @@ COPY foo /
func testTarContextExternalDockerfile(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
- isFileOp := getFileOp(t, sb)
foo := []byte("contents")
@@ -4855,11 +4676,11 @@ func testTarContextExternalDockerfile(t *testing.T, sb integration.Sandbox) {
FROM scratch
COPY foo bar
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -4869,16 +4690,13 @@ COPY foo bar
url := up.Add(buf)
// repeat with changed default args should match the old cache
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
- "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
- "context": url,
- "dockerfilekey": builder.DefaultLocalNameDockerfile,
- "contextsubdir": "sub/dir",
+ "context": url,
+ "dockerfilekey": builder.DefaultLocalNameDockerfile,
+ "contextsubdir": "sub/dir",
},
Session: []session.Attachable{up},
LocalDirs: map[string]string{
@@ -4893,7 +4711,7 @@ COPY foo bar
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, string(dt), "contents")
}
@@ -4907,12 +4725,12 @@ func testFrontendUseForwardedSolveResults(t *testing.T, sb integration.Sandbox)
FROM scratch
COPY foo foo2
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("data"), 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
res, err := c.Solve(ctx, gateway.SolveRequest{
@@ -4946,9 +4764,7 @@ COPY foo foo2
})
}
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = c.Build(sb.Context(), client.SolveOpt{
Exports: []client.ExportEntry{
@@ -4964,7 +4780,7 @@ COPY foo foo2
}, "", frontend, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo3"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "foo3"))
require.NoError(t, err)
require.Equal(t, dt, []byte("data"))
}
@@ -4976,9 +4792,7 @@ func testFrontendInputs(t *testing.T, sb integration.Sandbox) {
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
outMount := llb.Image("busybox").Run(
llb.Shlex(`sh -c "cat /dev/urandom | head -c 100 | sha256sum > /out/foo"`),
@@ -4997,7 +4811,7 @@ func testFrontendInputs(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)
- expected, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ expected, err := os.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
dockerfile := []byte(`
@@ -5005,11 +4819,11 @@ FROM scratch
COPY foo foo2
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
Exports: []client.ExportEntry{
@@ -5027,13 +4841,16 @@ COPY foo foo2
}, nil)
require.NoError(t, err)
- actual, err := ioutil.ReadFile(filepath.Join(destDir, "foo2"))
+ actual, err := os.ReadFile(filepath.Join(destDir, "foo2"))
require.NoError(t, err)
require.Equal(t, expected, actual)
}
func testFrontendSubrequests(t *testing.T, sb integration.Sandbox) {
f := getFrontend(t, sb)
+ if _, ok := f.(*clientFrontend); !ok {
+ t.Skip("only test with client frontend")
+ }
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -5044,15 +4861,11 @@ FROM scratch
COPY Dockerfile Dockerfile
`)
- if gf, ok := f.(*gatewayFrontend); ok {
- dockerfile = []byte(fmt.Sprintf("#syntax=%s\n\n%s", gf.gw, dockerfile))
- }
-
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
called := false
@@ -5070,6 +4883,7 @@ COPY Dockerfile Dockerfile
require.Equal(t, subrequests.RequestType("rpc"), req.Type)
require.NotEqual(t, req.Version, "")
require.True(t, len(req.Metadata) > 0)
+ require.Equal(t, "result.json", req.Metadata[0].Name)
}
}
require.True(t, hasDescribe)
@@ -5121,11 +4935,11 @@ RUN echo $HOSTNAME | grep foo
RUN echo $(hostname) | grep foo
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
@@ -5179,19 +4993,17 @@ FROM scratch
COPY --from=base /shmsize /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -5210,7 +5022,7 @@ COPY --from=base /shmsize /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "shmsize"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "shmsize"))
require.NoError(t, err)
require.Contains(t, string(dt), `size=131072k`)
}
@@ -5224,19 +5036,17 @@ FROM scratch
COPY --from=base /ulimit /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -5255,7 +5065,7 @@ COPY --from=base /ulimit /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "ulimit"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "ulimit"))
require.NoError(t, err)
require.Equal(t, `1062`, strings.TrimSpace(string(dt)))
}
@@ -5273,19 +5083,17 @@ FROM scratch
COPY --from=base /out /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
c, err := client.New(sb.Context(), sb.Address())
require.NoError(t, err)
defer c.Close()
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
@@ -5304,7 +5112,7 @@ COPY --from=base /out /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.Contains(t, strings.TrimSpace(string(dt)), `/foocgroup/buildkit/`)
}
@@ -5323,20 +5131,19 @@ FROM scratch
COPY --from=base /out /
`)
- dir, err := tmpdir(
+ dir, err := integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
f := getFrontend(t, sb)
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
+ destDir := t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
+ // Make sure image resolution works as expected, do not add a tag or locator.
"context:busybox": "docker-image://alpine",
},
LocalDirs: map[string]string{
@@ -5352,55 +5159,77 @@ COPY --from=base /out /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.True(t, len(dt) > 0)
-}
-
-func testNamedLocalContext(t *testing.T, sb integration.Sandbox) {
- ctx := sb.Context()
- c, err := client.New(ctx, sb.Address())
- require.NoError(t, err)
- defer c.Close()
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
- dockerfile := []byte(`
-FROM busybox AS base
-RUN cat /etc/alpine-release > /out
-FROM scratch
-COPY --from=base /o* /
+ // Now test with an image with custom envs
+ dockerfile = []byte(`
+FROM alpine:latest
+ENV PATH=/foobar:$PATH
+ENV FOOBAR=foobar
`)
- dir, err := tmpdir(
+ dir, err = integration.Tmpdir(
+ t,
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
- outf := []byte(`dummy-result`)
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+ target := registry + "/buildkit/testnamedimagecontext:latest"
- dir2, err := tmpdir(
- fstest.CreateFile("out", outf, 0600),
- fstest.CreateFile("out2", outf, 0600),
- fstest.CreateFile(".dockerignore", []byte("out2\n"), 0600),
- )
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
require.NoError(t, err)
- defer os.RemoveAll(dir2)
- f := getFrontend(t, sb)
+ dockerfile = []byte(`
+FROM busybox AS base
+RUN cat /etc/alpine-release > /out
+RUN env | grep PATH > /env_path
+RUN env | grep FOOBAR > /env_foobar
+FROM scratch
+COPY --from=base /out /
+COPY --from=base /env_path /
+COPY --from=base /env_foobar /
+ `)
- destDir, err := ioutil.TempDir("", "buildkit")
+ dir, err = integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
require.NoError(t, err)
- defer os.RemoveAll(destDir)
+
+ f = getFrontend(t, sb)
+
+ destDir = t.TempDir()
_, err = f.Solve(sb.Context(), c, client.SolveOpt{
FrontendAttrs: map[string]string{
- "context:base": "local:basedir",
+ "context:busybox": "docker-image://" + target,
},
LocalDirs: map[string]string{
builder.DefaultLocalNameDockerfile: dir,
builder.DefaultLocalNameContext: dir,
- "basedir": dir2,
},
Exports: []client.ExportEntry{
{
@@ -5411,284 +5240,1388 @@ COPY --from=base /o* /
}, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dt, err = os.ReadFile(filepath.Join(destDir, "out"))
require.NoError(t, err)
require.True(t, len(dt) > 0)
- _, err = ioutil.ReadFile(filepath.Join(destDir, "out2"))
- require.Error(t, err)
- require.True(t, errors.Is(err, os.ErrNotExist))
+ dt, err = os.ReadFile(filepath.Join(destDir, "env_foobar"))
+ require.NoError(t, err)
+ require.Equal(t, "FOOBAR=foobar", strings.TrimSpace(string(dt)))
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "env_path"))
+ require.NoError(t, err)
+ require.Contains(t, string(dt), "/foobar:")
}
-func testNamedInputContext(t *testing.T, sb integration.Sandbox) {
+func testNamedImageContextPlatform(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
ctx := sb.Context()
c, err := client.New(ctx, sb.Address())
require.NoError(t, err)
defer c.Close()
- dockerfile := []byte(`
-FROM alpine
-ENV FOO=bar
-RUN echo first > /out
-`)
-
- dir, err := tmpdir(
- fstest.CreateFile("Dockerfile", dockerfile, 0600),
- )
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- defer os.RemoveAll(dir)
- dockerfile2 := []byte(`
-FROM base AS build
-RUN echo "foo is $FOO" > /foo
-FROM scratch
-COPY --from=build /foo /out /
-`)
+ // Build a base image and force buildkit to generate a manifest list.
+ dockerfile := []byte(`FROM --platform=$BUILDPLATFORM alpine:latest`)
+ target := registry + "/buildkit/testnamedimagecontextplatform:latest"
- dir2, err := tmpdir(
- fstest.CreateFile("Dockerfile", dockerfile2, 0600),
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
f := getFrontend(t, sb)
- b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
- res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{})
- if err != nil {
- return nil, err
- }
- ref, err := res.SingleRef()
- if err != nil {
- return nil, err
- }
- st, err := ref.ToState()
- if err != nil {
- return nil, err
- }
-
- def, err := st.Marshal(ctx)
- if err != nil {
- return nil, err
- }
-
- dt, ok := res.Metadata["containerimage.config"]
- if !ok {
- return nil, errors.Errorf("no containerimage.config in metadata")
- }
-
- dt, err = json.Marshal(map[string][]byte{
- "containerimage.config": dt,
- })
- if err != nil {
- return nil, err
- }
-
- res, err = f.SolveGateway(ctx, c, gateway.SolveRequest{
- FrontendOpt: map[string]string{
- "dockerfilekey": builder.DefaultLocalNameDockerfile + "2",
- "context:base": "input:base",
- "input-metadata:base": string(dt),
- },
- FrontendInputs: map[string]*pb.Definition{
- "base": def.ToPB(),
- },
- })
- if err != nil {
- return nil, err
- }
- return res, nil
- }
-
- product := "buildkit_test"
-
- destDir, err := ioutil.TempDir("", "buildkit")
- require.NoError(t, err)
- defer os.RemoveAll(destDir)
-
- _, err = c.Build(ctx, client.SolveOpt{
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "build-arg:BUILDKIT_MULTI_PLATFORM": "true",
+ },
LocalDirs: map[string]string{
- builder.DefaultLocalNameDockerfile: dir,
- builder.DefaultLocalNameContext: dir,
- builder.DefaultLocalNameDockerfile + "2": dir2,
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
},
Exports: []client.ExportEntry{
{
- Type: client.ExporterLocal,
- OutputDir: destDir,
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
},
},
- }, product, b, nil)
+ }, nil)
require.NoError(t, err)
- dt, err := ioutil.ReadFile(filepath.Join(destDir, "out"))
+ dockerfile = []byte(`
+FROM --platform=$BUILDPLATFORM busybox AS target
+RUN echo hello
+`)
+
+ dir, err = integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
require.NoError(t, err)
- require.Equal(t, "first\n", string(dt))
- dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
+ f = getFrontend(t, sb)
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "context:busybox": "docker-image://" + target,
+ // random platform that would never exist so it doesn't conflict with the build machine
+ // here we specifically want to make sure that the platform chosen for the image source is the one in the dockerfile not the target platform.
+ "platform": "darwin/ppc64le",
+ },
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ }, nil)
require.NoError(t, err)
- require.Equal(t, "foo is bar\n", string(dt))
}
-func testNamedMultiplatformInputContext(t *testing.T, sb integration.Sandbox) {
+func testNamedImageContextTimestamps(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush)
ctx := sb.Context()
c, err := client.New(ctx, sb.Address())
require.NoError(t, err)
defer c.Close()
- dockerfile := []byte(`
-FROM --platform=$BUILDPLATFORM alpine
-ARG TARGETARCH
-ENV FOO=bar-$TARGETARCH
-RUN echo "foo $TARGETARCH" > /out
-`)
-
- dir, err := tmpdir(
- fstest.CreateFile("Dockerfile", dockerfile, 0600),
- )
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
require.NoError(t, err)
- defer os.RemoveAll(dir)
- dockerfile2 := []byte(`
-FROM base AS build
-RUN echo "foo is $FOO" > /foo
-FROM scratch
-COPY --from=build /foo /out /
-`)
+ f := getFrontend(t, sb)
- dir2, err := tmpdir(
- fstest.CreateFile("Dockerfile", dockerfile2, 0600),
+ dockerfile := []byte(`
+FROM alpine
+RUN echo foo >> /test
+`)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
- defer os.RemoveAll(dir)
-
- f := getFrontend(t, sb)
- b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
- res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{
- FrontendOpt: map[string]string{
- "platform": "linux/amd64,linux/arm64",
- },
- })
- if err != nil {
+ target := registry + "/buildkit/testnamedimagecontexttimestamps:latest"
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": target,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ desc, provider, err := contentutil.ProviderFromRef(target)
+ require.NoError(t, err)
+ img, err := testutil.ReadImage(sb.Context(), provider, desc)
+ require.NoError(t, err)
+
+ dirDerived, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ targetDerived := registry + "/buildkit/testnamedimagecontexttimestampsderived:latest"
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "context:alpine": "docker-image://" + target,
+ },
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dirDerived,
+ builder.DefaultLocalNameContext: dirDerived,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": targetDerived,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ desc, provider, err = contentutil.ProviderFromRef(targetDerived)
+ require.NoError(t, err)
+ imgDerived, err := testutil.ReadImage(sb.Context(), provider, desc)
+ require.NoError(t, err)
+
+ require.NotEqual(t, img.Img.Created, imgDerived.Img.Created)
+ diff := imgDerived.Img.Created.Sub(*img.Img.Created)
+ require.Greater(t, diff, time.Duration(0))
+ require.Less(t, diff, 10*time.Minute)
+}
+
+func testNamedImageContextScratch(t *testing.T, sb integration.Sandbox) {
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ dockerfile := []byte(`
+FROM busybox
+COPY < /out
+FROM scratch
+COPY --from=base /o* /
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ outf := []byte(`dummy-result`)
+
+ dir2, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("out", outf, 0600),
+ fstest.CreateFile("out2", outf, 0600),
+ fstest.CreateFile(".dockerignore", []byte("out2\n"), 0600),
+ )
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ destDir := t.TempDir()
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "context:base": "local:basedir",
+ },
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ "basedir": dir2,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
+ require.NoError(t, err)
+ require.True(t, len(dt) > 0)
+
+ _, err = os.ReadFile(filepath.Join(destDir, "out2"))
+ require.Error(t, err)
+ require.True(t, errors.Is(err, os.ErrNotExist))
+}
+
+func testNamedOCILayoutContext(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout)
+ // how this test works:
+ // 1- we use a regular builder with a dockerfile to create an image two files: "out" with content "first", "out2" with content "second"
+ // 2- we save the output to an OCI layout dir
+ // 3- we use another regular builder with a dockerfile to build using a referenced context "base", but override it to reference the output of the previous build
+ // 4- we check that the output of the second build matches our OCI layout, and not the referenced image
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ // create a tempdir where we will store the OCI layout
+ ocidir := t.TempDir()
+
+ ociDockerfile := []byte(`
+ FROM busybox:latest
+ WORKDIR /test
+ RUN sh -c "echo -n first > out"
+ RUN sh -c "echo -n second > out2"
+ ENV foo=bar
+ `)
+ inDir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", ociDockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ outW := bytes.NewBuffer(nil)
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: inDir,
+ builder.DefaultLocalNameContext: inDir,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterOCI,
+ Output: fixedWriteCloser(nopWriteCloser{outW}),
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ // extract the tar stream to the directory as OCI layout
+ m, err := testutil.ReadTarToMap(outW.Bytes(), false)
+ require.NoError(t, err)
+
+ for filename, content := range m {
+ fullFilename := path.Join(ocidir, filename)
+ err = os.MkdirAll(path.Dir(fullFilename), 0755)
+ require.NoError(t, err)
+ if content.Header.FileInfo().IsDir() {
+ err = os.MkdirAll(fullFilename, 0755)
+ require.NoError(t, err)
+ } else {
+ err = os.WriteFile(fullFilename, content.Data, 0644)
+ require.NoError(t, err)
+ }
+ }
+
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(index.Manifests))
+ digest := index.Manifests[0].Digest.Hex()
+
+ store, err := local.NewStore(ocidir)
+ ociID := "ocione"
+ require.NoError(t, err)
+
+ // we will use this simple dockerfile to test
+ // 1. busybox is used as is, but because we override the context for base,
+ // when we run `COPY --from=base`, it should take the /o* from the image in the store,
+ // rather than what we built on the first 2 lines here.
+ // 2. we override the context for `foo` to be our local OCI store, which has an `ENV foo=bar` override.
+ // As such, the `RUN echo $foo` step should have `$foo` set to `"bar"`, and so
+ // when we `COPY --from=imported`, it should have the content of `/outfoo` as `"bar"`
+ dockerfile := []byte(`
+FROM busybox AS base
+RUN cat /etc/alpine-release > out
+
+FROM foo AS imported
+RUN echo -n $foo > outfoo
+
+FROM scratch
+COPY --from=base /test/o* /
+COPY --from=imported /test/outfoo /
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ destDir := t.TempDir()
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "context:base": fmt.Sprintf("oci-layout:%s@sha256:%s", ociID, digest),
+ "context:foo": fmt.Sprintf("oci-layout:%s@sha256:%s", ociID, digest),
+ },
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ OCIStores: map[string]content.Store{
+ ociID: store,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
+ require.NoError(t, err)
+ require.True(t, len(dt) > 0)
+ require.Equal(t, []byte("first"), dt)
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "out2"))
+ require.NoError(t, err)
+ require.True(t, len(dt) > 0)
+ require.Equal(t, []byte("second"), dt)
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "outfoo"))
+ require.NoError(t, err)
+ require.True(t, len(dt) > 0)
+ require.Equal(t, []byte("bar"), dt)
+}
+
+func testNamedOCILayoutContextExport(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureOCILayout)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ ocidir := t.TempDir()
+
+ dockerfile := []byte(`
+FROM scratch
+WORKDIR /test
+ENV foo=bar
+ `)
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ outW := bytes.NewBuffer(nil)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ Exports: []client.ExportEntry{{
+ Type: client.ExporterOCI,
+ Output: fixedWriteCloser(nopWriteCloser{outW}),
+ }},
+ }, nil)
+ require.NoError(t, err)
+
+ m, err := testutil.ReadTarToMap(outW.Bytes(), false)
+ require.NoError(t, err)
+
+ for filename, content := range m {
+ fullFilename := path.Join(ocidir, filename)
+ err = os.MkdirAll(path.Dir(fullFilename), 0755)
+ require.NoError(t, err)
+ if content.Header.FileInfo().IsDir() {
+ err = os.MkdirAll(fullFilename, 0755)
+ require.NoError(t, err)
+ } else {
+ err = os.WriteFile(fullFilename, content.Data, 0644)
+ require.NoError(t, err)
+ }
+ }
+
+ var index ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &index)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(index.Manifests))
+ digest := index.Manifests[0].Digest.Hex()
+
+ store, err := local.NewStore(ocidir)
+ ociID := "ocione"
+ require.NoError(t, err)
+
+ dockerfile = []byte(`
+FROM nonexistent AS base
+`)
+
+ dir, err = integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ outW = bytes.NewBuffer(nil)
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "context:nonexistent": fmt.Sprintf("oci-layout:%s@sha256:%s", ociID, digest),
+ },
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ OCIStores: map[string]content.Store{
+ ociID: store,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterOCI,
+ Output: fixedWriteCloser(nopWriteCloser{outW}),
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ m, err = testutil.ReadTarToMap(outW.Bytes(), false)
+ require.NoError(t, err)
+
+ err = json.Unmarshal(m["index.json"].Data, &index)
+ require.NoError(t, err)
+ require.Equal(t, 1, len(index.Manifests))
+ digest = index.Manifests[0].Digest.Hex()
+
+ var mfst ocispecs.Manifest
+ require.NoError(t, json.Unmarshal(m["blobs/sha256/"+digest].Data, &mfst))
+ digest = mfst.Config.Digest.Hex()
+
+ var cfg ocispecs.Image
+ require.NoError(t, json.Unmarshal(m["blobs/sha256/"+digest].Data, &cfg))
+
+ require.Equal(t, "/test", cfg.Config.WorkingDir)
+ require.Contains(t, cfg.Config.Env, "foo=bar")
+}
+
+func testNamedInputContext(t *testing.T, sb integration.Sandbox) {
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ dockerfile := []byte(`
+FROM alpine
+ENV FOO=bar
+RUN echo first > /out
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ dockerfile2 := []byte(`
+FROM base AS build
+RUN echo "foo is $FOO" > /foo
+FROM scratch
+COPY --from=build /foo /out /
+`)
+
+ dir2, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile2, 0600),
+ )
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{})
+ if err != nil {
+ return nil, err
+ }
+ ref, err := res.SingleRef()
+ if err != nil {
+ return nil, err
+ }
+ st, err := ref.ToState()
+ if err != nil {
+ return nil, err
+ }
+
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ dt, ok := res.Metadata["containerimage.config"]
+ if !ok {
+ return nil, errors.Errorf("no containerimage.config in metadata")
+ }
+
+ dt, err = json.Marshal(map[string][]byte{
+ "containerimage.config": dt,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ res, err = f.SolveGateway(ctx, c, gateway.SolveRequest{
+ FrontendOpt: map[string]string{
+ "dockerfilekey": builder.DefaultLocalNameDockerfile + "2",
+ "context:base": "input:base",
+ "input-metadata:base": string(dt),
+ },
+ FrontendInputs: map[string]*pb.Definition{
+ "base": def.ToPB(),
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+ }
+
+ product := "buildkit_test"
+
+ destDir := t.TempDir()
+
+ _, err = c.Build(ctx, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ builder.DefaultLocalNameDockerfile + "2": dir2,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, product, b, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "out"))
+ require.NoError(t, err)
+ require.Equal(t, "first\n", string(dt))
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "foo"))
+ require.NoError(t, err)
+ require.Equal(t, "foo is bar\n", string(dt))
+}
+
+func testNamedMultiplatformInputContext(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureMultiPlatform)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ dockerfile := []byte(`
+FROM --platform=$BUILDPLATFORM alpine
+ARG TARGETARCH
+ENV FOO=bar-$TARGETARCH
+RUN echo "foo $TARGETARCH" > /out
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ dockerfile2 := []byte(`
+FROM base AS build
+RUN echo "foo is $FOO" > /foo
+FROM scratch
+COPY --from=build /foo /out /
+`)
+
+ dir2, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile2, 0600),
+ )
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ b := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
+ res, err := f.SolveGateway(ctx, c, gateway.SolveRequest{
+ FrontendOpt: map[string]string{
+ "platform": "linux/amd64,linux/arm64",
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ if len(res.Refs) != 2 {
+ return nil, errors.Errorf("expected 2 refs, got %d", len(res.Refs))
+ }
+
+ inputs := map[string]*pb.Definition{}
+ st, err := res.Refs["linux/amd64"].ToState()
+ if err != nil {
+ return nil, err
+ }
+ def, err := st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ inputs["base::linux/amd64"] = def.ToPB()
+
+ st, err = res.Refs["linux/arm64"].ToState()
+ if err != nil {
+ return nil, err
+ }
+ def, err = st.Marshal(ctx)
+ if err != nil {
+ return nil, err
+ }
+ inputs["base::linux/arm64"] = def.ToPB()
+
+ frontendOpt := map[string]string{
+ "dockerfilekey": builder.DefaultLocalNameDockerfile + "2",
+ "context:base::linux/amd64": "input:base::linux/amd64",
+ "context:base::linux/arm64": "input:base::linux/arm64",
+ "platform": "linux/amd64,linux/arm64",
+ }
+
+ dt, ok := res.Metadata["containerimage.config/linux/amd64"]
+ if !ok {
+ return nil, errors.Errorf("no containerimage.config in metadata")
+ }
+ dt, err = json.Marshal(map[string][]byte{
+ "containerimage.config": dt,
+ })
+ if err != nil {
+ return nil, err
+ }
+ frontendOpt["input-metadata:base::linux/amd64"] = string(dt)
+
+ dt, ok = res.Metadata["containerimage.config/linux/arm64"]
+ if !ok {
+ return nil, errors.Errorf("no containerimage.config in metadata")
+ }
+ dt, err = json.Marshal(map[string][]byte{
+ "containerimage.config": dt,
+ })
+ if err != nil {
+ return nil, err
+ }
+ frontendOpt["input-metadata:base::linux/arm64"] = string(dt)
+
+ res, err = f.SolveGateway(ctx, c, gateway.SolveRequest{
+ FrontendOpt: frontendOpt,
+ FrontendInputs: inputs,
+ })
+ if err != nil {
return nil, err
}
+ return res, nil
+ }
+
+ product := "buildkit_test"
+
+ destDir := t.TempDir()
+
+ _, err = c.Build(ctx, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ builder.DefaultLocalNameDockerfile + "2": dir2,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterLocal,
+ OutputDir: destDir,
+ },
+ },
+ }, product, b, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "linux_amd64/out"))
+ require.NoError(t, err)
+ require.Equal(t, "foo amd64\n", string(dt))
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "linux_amd64/foo"))
+ require.NoError(t, err)
+ require.Equal(t, "foo is bar-amd64\n", string(dt))
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "linux_arm64/out"))
+ require.NoError(t, err)
+ require.Equal(t, "foo arm64\n", string(dt))
+
+ dt, err = os.ReadFile(filepath.Join(destDir, "linux_arm64/foo"))
+ require.NoError(t, err)
+ require.Equal(t, "foo is bar-arm64\n", string(dt))
+}
+
+func testSourceDateEpochWithoutExporter(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureOCIExporter, integration.FeatureSourceDateEpoch)
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+FROM scratch
+ENTRYPOINT foo bar
+COPY Dockerfile .
+`)
+
+ dir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+ defer os.RemoveAll(dir)
+
+ c, err := client.New(sb.Context(), sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ destDir, err := os.MkdirTemp("", "buildkit")
+ require.NoError(t, err)
+ defer os.RemoveAll(destDir)
+
+ out := filepath.Join(destDir, "out.tar")
+ outW, err := os.Create(out)
+ require.NoError(t, err)
+
+ tm := time.Date(2015, time.October, 21, 7, 28, 0, 0, time.UTC)
+
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ FrontendAttrs: map[string]string{
+ "build-arg:SOURCE_DATE_EPOCH": fmt.Sprintf("%d", tm.Unix()),
+ },
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: dir,
+ builder.DefaultLocalNameContext: dir,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterOCI,
+ // disable exporter epoch to make sure we test dockerfile
+ Attrs: map[string]string{"source-date-epoch": ""},
+ Output: fixedWriteCloser(outW),
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dt, err := os.ReadFile(filepath.Join(destDir, "out.tar"))
+ require.NoError(t, err)
+
+ m, err := testutil.ReadTarToMap(dt, false)
+ require.NoError(t, err)
+
+ var idx ocispecs.Index
+ err = json.Unmarshal(m["index.json"].Data, &idx)
+ require.NoError(t, err)
+
+ mlistHex := idx.Manifests[0].Digest.Hex()
+
+ var mfst ocispecs.Manifest
+ err = json.Unmarshal(m["blobs/sha256/"+mlistHex].Data, &mfst)
+ require.NoError(t, err)
+
+ var img ocispecs.Image
+ err = json.Unmarshal(m["blobs/sha256/"+mfst.Config.Digest.Hex()].Data, &img)
+ require.NoError(t, err)
+
+ require.Equal(t, tm.Unix(), img.Created.Unix())
+ for _, h := range img.History {
+ require.Equal(t, tm.Unix(), h.Created.Unix())
+ }
+}
+
+func testSBOMScannerImage(t *testing.T, sb integration.Sandbox) {
+ integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM)
+ ctx := sb.Context()
+
+ c, err := client.New(ctx, sb.Address())
+ require.NoError(t, err)
+ defer c.Close()
+
+ registry, err := sb.NewRegistry()
+ if errors.Is(err, integration.ErrRequirements) {
+ t.Skip(err.Error())
+ }
+ require.NoError(t, err)
+
+ f := getFrontend(t, sb)
+
+ dockerfile := []byte(`
+FROM busybox:latest
+COPY <<-"EOF" /scan.sh
+ set -e
+ cat < $BUILDKIT_SCAN_DESTINATION/spdx.json
+ {
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://spdx.dev/Document",
+ "predicate": {"name": "sbom-scan"}
+ }
+ BUNDLE
+EOF
+CMD sh /scan.sh
+`)
+ scannerDir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ scannerTarget := registry + "/buildkit/testsbomscanner:latest"
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: scannerDir,
+ builder.DefaultLocalNameContext: scannerDir,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": scannerTarget,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ dockerfile = []byte(`
+FROM scratch
+COPY < $BUILDKIT_SCAN_DESTINATION/spdx.json
+ {
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://spdx.dev/Document",
+ "predicate": {"name": "core"}
+ }
+ BUNDLE
+ if [ "${BUILDKIT_SCAN_SOURCE_EXTRAS}" ]; then
+ for src in "${BUILDKIT_SCAN_SOURCE_EXTRAS}"/*; do
+ cat < $BUILDKIT_SCAN_DESTINATION/$(basename $src).spdx.json
+ {
+ "_type": "https://in-toto.io/Statement/v0.1",
+ "predicateType": "https://spdx.dev/Document",
+ "predicate": {"name": "extra"}
+ }
+ BUNDLE
+ done
+ fi
+EOF
+CMD sh /scan.sh
+`)
+
+ scannerDir, err := integration.Tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+ require.NoError(t, err)
+
+ scannerTarget := registry + "/buildkit/testsbomscannerargs:latest"
+ _, err = f.Solve(sb.Context(), c, client.SolveOpt{
+ LocalDirs: map[string]string{
+ builder.DefaultLocalNameDockerfile: scannerDir,
+ builder.DefaultLocalNameContext: scannerDir,
+ },
+ Exports: []client.ExportEntry{
+ {
+ Type: client.ExporterImage,
+ Attrs: map[string]string{
+ "name": scannerTarget,
+ "push": "true",
+ },
+ },
+ },
+ }, nil)
+ require.NoError(t, err)
+
+ // scan an image with no additional sboms
+ dockerfile = []byte(`
+FROM scratch as base
+COPY < **Note on whitespace**
+>
+> For backward compatibility, leading whitespace before comments (`#`) and
+> instructions (such as `RUN`) are ignored, but discouraged. Leading whitespace
+> is not preserved in these cases, and the following examples are therefore
+> equivalent:
+>
+> ```dockerfile
+> # this is a comment-line
+> RUN echo hello
+> RUN echo world
+> ```
+>
+> ```dockerfile
+> # this is a comment-line
+> RUN echo hello
+> RUN echo world
+> ```
+>
+> Note however, that whitespace in instruction _arguments_, such as the commands
+> following `RUN`, are preserved, so the following example prints ` hello world`
+> with leading whitespace as specified:
+>
+> ```dockerfile
+> RUN echo "\
+> hello\
+> world"
+> ```
+
+## Parser directives
+
+Parser directives are optional, and affect the way in which subsequent lines
+in a `Dockerfile` are handled. Parser directives do not add layers to the build,
+and will not be shown as a build step. Parser directives are written as a
+special type of comment in the form `# directive=value`. A single directive
+may only be used once.
+
+Once a comment, empty line or builder instruction has been processed, Docker
+no longer looks for parser directives. Instead it treats anything formatted
+as a parser directive as a comment and does not attempt to validate if it might
+be a parser directive. Therefore, all parser directives must be at the very
+top of a `Dockerfile`.
+
+Parser directives are not case-sensitive. However, convention is for them to
+be lowercase. Convention is also to include a blank line following any
+parser directives. Line continuation characters are not supported in parser
+directives.
+
+Due to these rules, the following examples are all invalid:
+
+Invalid due to line continuation:
+
+```dockerfile
+# direc \
+tive=value
+```
+
+Invalid due to appearing twice:
+
+```dockerfile
+# directive=value1
+# directive=value2
+
+FROM ImageName
+```
+
+Treated as a comment due to appearing after a builder instruction:
+
+```dockerfile
+FROM ImageName
+# directive=value
+```
+
+Treated as a comment due to appearing after a comment which is not a parser
+directive:
+
+```dockerfile
+# About my dockerfile
+# directive=value
+FROM ImageName
+```
+
+The unknown directive is treated as a comment due to not being recognized. In
+addition, the known directive is treated as a comment due to appearing after
+a comment which is not a parser directive.
+
+```dockerfile
+# unknowndirective=value
+# knowndirective=value
+```
+
+Non line-breaking whitespace is permitted in a parser directive. Hence, the
+following lines are all treated identically:
+
+```dockerfile
+#directive=value
+# directive =value
+# directive= value
+# directive = value
+# dIrEcTiVe=value
+```
+
+The following parser directives are supported:
+
+- `syntax`
+- `escape`
+
+### syntax
+
+
+
+This feature is only available when using the [BuildKit](https://docs.docker.com/build/buildkit/)
+backend, and is ignored when using the classic builder backend.
+
+See [Custom Dockerfile syntax](https://docs.docker.com/build/buildkit/dockerfile-frontend/)
+page for more information.
+
+### escape
+
+```dockerfile
+# escape=\ (backslash)
+```
+
+Or
+
+```dockerfile
+# escape=` (backtick)
+```
+
+The `escape` directive sets the character used to escape characters in a
+`Dockerfile`. If not specified, the default escape character is `\`.
+
+The escape character is used both to escape characters in a line, and to
+escape a newline. This allows a `Dockerfile` instruction to
+span multiple lines. Note that regardless of whether the `escape` parser
+directive is included in a `Dockerfile`, *escaping is not performed in
+a `RUN` command, except at the end of a line.*
+
+Setting the escape character to `` ` `` is especially useful on
+`Windows`, where `\` is the directory path separator. `` ` `` is consistent
+with [Windows PowerShell](https://technet.microsoft.com/en-us/library/hh847755.aspx).
+
+Consider the following example which would fail in a non-obvious way on
+`Windows`. The second `\` at the end of the second line would be interpreted as an
+escape for the newline, instead of a target of the escape from the first `\`.
+Similarly, the `\` at the end of the third line would, assuming it was actually
+handled as an instruction, cause it be treated as a line continuation. The result
+of this dockerfile is that second and third lines are considered a single
+instruction:
+
+```dockerfile
+FROM microsoft/nanoserver
+COPY testfile.txt c:\\
+RUN dir c:\
+```
+
+Results in:
+
+```console
+PS E:\myproject> docker build -t cmd .
+
+Sending build context to Docker daemon 3.072 kB
+Step 1/2 : FROM microsoft/nanoserver
+ ---> 22738ff49c6d
+Step 2/2 : COPY testfile.txt c:\RUN dir c:
+GetFileAttributesEx c:RUN: The system cannot find the file specified.
+PS E:\myproject>
+```
+
+One solution to the above would be to use `/` as the target of both the `COPY`
+instruction, and `dir`. However, this syntax is, at best, confusing as it is not
+natural for paths on `Windows`, and at worst, error prone as not all commands on
+`Windows` support `/` as the path separator.
+
+By adding the `escape` parser directive, the following `Dockerfile` succeeds as
+expected with the use of natural platform semantics for file paths on `Windows`:
+
+```dockerfile
+# escape=`
+
+FROM microsoft/nanoserver
+COPY testfile.txt c:\
+RUN dir c:\
+```
+
+Results in:
+
+```console
+PS E:\myproject> docker build -t succeeds --no-cache=true .
+
+Sending build context to Docker daemon 3.072 kB
+Step 1/3 : FROM microsoft/nanoserver
+ ---> 22738ff49c6d
+Step 2/3 : COPY testfile.txt c:\
+ ---> 96655de338de
+Removing intermediate container 4db9acbb1682
+Step 3/3 : RUN dir c:\
+ ---> Running in a2c157f842f5
+ Volume in drive C has no label.
+ Volume Serial Number is 7E6D-E0F7
+
+ Directory of c:\
+
+10/05/2016 05:04 PM 1,894 License.txt
+10/05/2016 02:22 PM Program Files
+10/05/2016 02:14 PM Program Files (x86)
+10/28/2016 11:18 AM 62 testfile.txt
+10/28/2016 11:20 AM Users
+10/28/2016 11:20 AM Windows
+ 2 File(s) 1,956 bytes
+ 4 Dir(s) 21,259,096,064 bytes free
+ ---> 01c7f3bef04f
+Removing intermediate container a2c157f842f5
+Successfully built 01c7f3bef04f
+PS E:\myproject>
+```
+
+## Environment replacement
+
+Environment variables (declared with [the `ENV` statement](#env)) can also be
+used in certain instructions as variables to be interpreted by the
+`Dockerfile`. Escapes are also handled for including variable-like syntax
+into a statement literally.
+
+Environment variables are notated in the `Dockerfile` either with
+`$variable_name` or `${variable_name}`. They are treated equivalently and the
+brace syntax is typically used to address issues with variable names with no
+whitespace, like `${foo}_bar`.
+
+The `${variable_name}` syntax also supports a few of the standard `bash`
+modifiers as specified below:
+
+- `${variable:-word}` indicates that if `variable` is set then the result
+ will be that value. If `variable` is not set then `word` will be the result.
+- `${variable:+word}` indicates that if `variable` is set then `word` will be
+ the result, otherwise the result is the empty string.
+
+In all cases, `word` can be any string, including additional environment
+variables.
+
+Escaping is possible by adding a `\` before the variable: `\$foo` or `\${foo}`,
+for example, will translate to `$foo` and `${foo}` literals respectively.
+
+Example (parsed representation is displayed after the `#`):
+
+```dockerfile
+FROM busybox
+ENV FOO=/bar
+WORKDIR ${FOO} # WORKDIR /bar
+ADD . $FOO # ADD . /bar
+COPY \$FOO /quux # COPY $FOO /quux
+```
+
+Environment variables are supported by the following list of instructions in
+the `Dockerfile`:
+
+- `ADD`
+- `COPY`
+- `ENV`
+- `EXPOSE`
+- `FROM`
+- `LABEL`
+- `STOPSIGNAL`
+- `USER`
+- `VOLUME`
+- `WORKDIR`
+- `ONBUILD` (when combined with one of the supported instructions above)
+
+Environment variable substitution will use the same value for each variable
+throughout the entire instruction. In other words, in this example:
+
+```dockerfile
+ENV abc=hello
+ENV abc=bye def=$abc
+ENV ghi=$abc
+```
+
+will result in `def` having a value of `hello`, not `bye`. However,
+`ghi` will have a value of `bye` because it is not part of the same instruction
+that set `abc` to `bye`.
+
+## .dockerignore file
+
+Before the docker CLI sends the context to the docker daemon, it looks
+for a file named `.dockerignore` in the root directory of the context.
+If this file exists, the CLI modifies the context to exclude files and
+directories that match patterns in it. This helps to avoid
+unnecessarily sending large or sensitive files and directories to the
+daemon and potentially adding them to images using `ADD` or `COPY`.
+
+The CLI interprets the `.dockerignore` file as a newline-separated
+list of patterns similar to the file globs of Unix shells. For the
+purposes of matching, the root of the context is considered to be both
+the working and the root directory. For example, the patterns
+`/foo/bar` and `foo/bar` both exclude a file or directory named `bar`
+in the `foo` subdirectory of `PATH` or in the root of the git
+repository located at `URL`. Neither excludes anything else.
+
+If a line in `.dockerignore` file starts with `#` in column 1, then this line is
+considered as a comment and is ignored before interpreted by the CLI.
+
+Here is an example `.dockerignore` file:
+
+```gitignore
+# comment
+*/temp*
+*/*/temp*
+temp?
+```
+
+This file causes the following build behavior:
+
+| Rule | Behavior |
+|:------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `# comment` | Ignored. |
+| `*/temp*` | Exclude files and directories whose names start with `temp` in any immediate subdirectory of the root. For example, the plain file `/somedir/temporary.txt` is excluded, as is the directory `/somedir/temp`. |
+| `*/*/temp*` | Exclude files and directories starting with `temp` from any subdirectory that is two levels below the root. For example, `/somedir/subdir/temporary.txt` is excluded. |
+| `temp?` | Exclude files and directories in the root directory whose names are a one-character extension of `temp`. For example, `/tempa` and `/tempb` are excluded. |
+
+
+Matching is done using Go's
+[filepath.Match](https://golang.org/pkg/path/filepath#Match) rules. A
+preprocessing step removes leading and trailing whitespace and
+eliminates `.` and `..` elements using Go's
+[filepath.Clean](https://golang.org/pkg/path/filepath/#Clean). Lines
+that are blank after preprocessing are ignored.
+
+Beyond Go's filepath.Match rules, Docker also supports a special
+wildcard string `**` that matches any number of directories (including
+zero). For example, `**/*.go` will exclude all files that end with `.go`
+that are found in all directories, including the root of the build context.
+
+Lines starting with `!` (exclamation mark) can be used to make exceptions
+to exclusions. The following is an example `.dockerignore` file that
+uses this mechanism:
+
+```gitignore
+*.md
+!README.md
+```
+
+All markdown files *except* `README.md` are excluded from the context.
+
+The placement of `!` exception rules influences the behavior: the last
+line of the `.dockerignore` that matches a particular file determines
+whether it is included or excluded. Consider the following example:
+
+```gitignore
+*.md
+!README*.md
+README-secret.md
+```
+
+No markdown files are included in the context except README files other than
+`README-secret.md`.
+
+Now consider this example:
+
+```gitignore
+*.md
+README-secret.md
+!README*.md
+```
+
+All of the README files are included. The middle line has no effect because
+`!README*.md` matches `README-secret.md` and comes last.
+
+You can even use the `.dockerignore` file to exclude the `Dockerfile`
+and `.dockerignore` files. These files are still sent to the daemon
+because it needs them to do its job. But the `ADD` and `COPY` instructions
+do not copy them to the image.
+
+Finally, you may want to specify which files to include in the
+context, rather than which to exclude. To achieve this, specify `*` as
+the first pattern, followed by one or more `!` exception patterns.
+
+> **Note**
+>
+> For historical reasons, the pattern `.` is ignored.
+
+## FROM
+
+```dockerfile
+FROM [--platform=] [AS ]
+```
+
+Or
+
+```dockerfile
+FROM [--platform=] [:] [AS ]
+```
+
+Or
+
+```dockerfile
+FROM [--platform=] [@] [AS ]
+```
+
+The `FROM` instruction initializes a new build stage and sets the
+[*Base Image*](https://docs.docker.com/glossary/#base-image) for subsequent instructions. As such, a
+valid `Dockerfile` must start with a `FROM` instruction. The image can be
+any valid image – it is especially easy to start by **pulling an image** from
+the [*Public Repositories*](https://docs.docker.com/docker-hub/repos/).
+
+- `ARG` is the only instruction that may precede `FROM` in the `Dockerfile`.
+ See [Understand how ARG and FROM interact](#understand-how-arg-and-from-interact).
+- `FROM` can appear multiple times within a single `Dockerfile` to
+ create multiple images or use one build stage as a dependency for another.
+ Simply make a note of the last image ID output by the commit before each new
+ `FROM` instruction. Each `FROM` instruction clears any state created by previous
+ instructions.
+- Optionally a name can be given to a new build stage by adding `AS name` to the
+ `FROM` instruction. The name can be used in subsequent `FROM` and
+ `COPY --from=` instructions to refer to the image built in this stage.
+- The `tag` or `digest` values are optional. If you omit either of them, the
+ builder assumes a `latest` tag by default. The builder returns an error if it
+ cannot find the `tag` value.
+
+The optional `--platform` flag can be used to specify the platform of the image
+in case `FROM` references a multi-platform image. For example, `linux/amd64`,
+`linux/arm64`, or `windows/amd64`. By default, the target platform of the build
+request is used. Global build arguments can be used in the value of this flag,
+for example [automatic platform ARGs](#automatic-platform-args-in-the-global-scope)
+allow you to force a stage to native build platform (`--platform=$BUILDPLATFORM`),
+and use it to cross-compile to the target platform inside the stage.
+
+### Understand how ARG and FROM interact
+
+`FROM` instructions support variables that are declared by any `ARG`
+instructions that occur before the first `FROM`.
+
+```dockerfile
+ARG CODE_VERSION=latest
+FROM base:${CODE_VERSION}
+CMD /code/run-app
+
+FROM extras:${CODE_VERSION}
+CMD /code/run-extras
+```
+
+An `ARG` declared before a `FROM` is outside of a build stage, so it
+can't be used in any instruction after a `FROM`. To use the default value of
+an `ARG` declared before the first `FROM` use an `ARG` instruction without
+a value inside of a build stage:
+
+```dockerfile
+ARG VERSION=latest
+FROM busybox:$VERSION
+ARG VERSION
+RUN echo $VERSION > image_version
+```
+
+## RUN
+
+RUN has 2 forms:
+
+- `RUN ` (*shell* form, the command is run in a shell, which by
+default is `/bin/sh -c` on Linux or `cmd /S /C` on Windows)
+- `RUN ["executable", "param1", "param2"]` (*exec* form)
+
+The `RUN` instruction will execute any commands in a new layer on top of the
+current image and commit the results. The resulting committed image will be
+used for the next step in the `Dockerfile`.
+
+Layering `RUN` instructions and generating commits conforms to the core
+concepts of Docker where commits are cheap and containers can be created from
+any point in an image's history, much like source control.
+
+The *exec* form makes it possible to avoid shell string munging, and to `RUN`
+commands using a base image that does not contain the specified shell executable.
+
+The default shell for the *shell* form can be changed using the `SHELL`
+command.
+
+In the *shell* form you can use a `\` (backslash) to continue a single
+RUN instruction onto the next line. For example, consider these two lines:
+
+```dockerfile
+RUN /bin/bash -c 'source $HOME/.bashrc; \
+echo $HOME'
+```
+
+Together they are equivalent to this single line:
+
+```dockerfile
+RUN /bin/bash -c 'source $HOME/.bashrc; echo $HOME'
+```
+
+To use a different shell, other than '/bin/sh', use the *exec* form passing in
+the desired shell. For example:
+
+```dockerfile
+RUN ["/bin/bash", "-c", "echo hello"]
+```
+
+> **Note**
+>
+> The *exec* form is parsed as a JSON array, which means that
+> you must use double-quotes (") around words not single-quotes (').
+
+Unlike the *shell* form, the *exec* form does not invoke a command shell.
+This means that normal shell processing does not happen. For example,
+`RUN [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`.
+If you want shell processing then either use the *shell* form or execute
+a shell directly, for example: `RUN [ "sh", "-c", "echo $HOME" ]`.
+When using the exec form and executing a shell directly, as in the case for
+the shell form, it is the shell that is doing the environment variable
+expansion, not docker.
+
+> **Note**
+>
+> In the *JSON* form, it is necessary to escape backslashes. This is
+> particularly relevant on Windows where the backslash is the path separator.
+> The following line would otherwise be treated as *shell* form due to not
+> being valid JSON, and fail in an unexpected way:
+>
+> ```dockerfile
+> RUN ["c:\windows\system32\tasklist.exe"]
+> ```
+>
+> The correct syntax for this example is:
+>
+> ```dockerfile
+> RUN ["c:\\windows\\system32\\tasklist.exe"]
+> ```
+
+The cache for `RUN` instructions isn't invalidated automatically during
+the next build. The cache for an instruction like
+`RUN apt-get dist-upgrade -y` will be reused during the next build. The
+cache for `RUN` instructions can be invalidated by using the `--no-cache`
+flag, for example `docker build --no-cache`.
+
+See the [`Dockerfile` Best Practices
+guide](https://docs.docker.com/engine/userguide/eng-image/dockerfile_best-practices/) for more information.
+
+The cache for `RUN` instructions can be invalidated by [`ADD`](#add) and [`COPY`](#copy) instructions.
+
+### Known issues (RUN)
+
+- [Issue 783](https://github.com/docker/docker/issues/783) is about file
+ permissions problems that can occur when using the AUFS file system. You
+ might notice it during an attempt to `rm` a file, for example.
+
+ For systems that have recent aufs version (i.e., `dirperm1` mount option can
+ be set), docker will attempt to fix the issue automatically by mounting
+ the layers with `dirperm1` option. More details on `dirperm1` option can be
+ found at [`aufs` man page](https://github.com/sfjro/aufs3-linux/tree/aufs3.18/Documentation/filesystems/aufs)
+
+ If your system doesn't have support for `dirperm1`, the issue describes a workaround.
+
+## RUN --mount
+
+> **Note**
+>
+> Added in [`docker/dockerfile:1.2`](#syntax)
+
+`RUN --mount` allows you to create filesystem mounts that the build can access.
+This can be used to:
+
+- Create bind mount to the host filesystem or other build stages
+- Access build secrets or ssh-agent sockets
+- Use a persistent package management cache to speed up your build
+
+Syntax: `--mount=[type=][,option=[,option=]...]`
+
+### Mount types
+
+| Type | Description |
+|------------------------------------------|-----------------------------------------------------------------------------------------------------------|
+| [`bind`](#run---mounttypebind) (default) | Bind-mount context directories (read-only). |
+| [`cache`](#run---mounttypecache) | Mount a temporary directory to cache directories for compilers and package managers. |
+| [`secret`](#run---mounttypesecret) | Allow the build container to access secure files such as private keys without baking them into the image. |
+| [`ssh`](#run---mounttypessh) | Allow the build container to access SSH keys via SSH agents, with support for passphrases. |
+
+### RUN --mount=type=bind
+
+This mount type allows binding files or directories to the build container. A
+bind mount is read-only by default.
+
+| Option | Description |
+|----------------------|--------------------------------------------------------------------------------------|
+| `target`[^1] | Mount path. |
+| `source` | Source path in the `from`. Defaults to the root of the `from`. |
+| `from` | Build stage or image name for the root of the source. Defaults to the build context. |
+| `rw`,`readwrite` | Allow writes on the mount. Written data will be discarded. |
+
+### RUN --mount=type=cache
+
+This mount type allows the build container to cache directories for compilers
+and package managers.
+
+| Option | Description |
+|---------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `id` | Optional ID to identify separate/different caches. Defaults to value of `target`. |
+| `target`[^1] | Mount path. |
+| `ro`,`readonly` | Read-only if set. |
+| `sharing` | One of `shared`, `private`, or `locked`. Defaults to `shared`. A `shared` cache mount can be used concurrently by multiple writers. `private` creates a new mount if there are multiple writers. `locked` pauses the second writer until the first one releases the mount. |
+| `from` | Build stage to use as a base of the cache mount. Defaults to empty directory. |
+| `source` | Subpath in the `from` to mount. Defaults to the root of the `from`. |
+| `mode` | File mode for new cache directory in octal. Default `0755`. |
+| `uid` | User ID for new cache directory. Default `0`. |
+| `gid` | Group ID for new cache directory. Default `0`. |
+
+Contents of the cache directories persists between builder invocations without
+invalidating the instruction cache. Cache mounts should only be used for better
+performance. Your build should work with any contents of the cache directory as
+another build may overwrite the files or GC may clean it if more storage space
+is needed.
+
+#### Example: cache Go packages
+
+```dockerfile
+# syntax=docker/dockerfile:1
+FROM golang
+RUN --mount=type=cache,target=/root/.cache/go-build \
+ go build ...
+```
+
+#### Example: cache apt packages
+
+```dockerfile
+# syntax=docker/dockerfile:1
+FROM ubuntu
+RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
+RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
+ --mount=type=cache,target=/var/lib/apt,sharing=locked \
+ apt update && apt-get --no-install-recommends install -y gcc
+```
+
+Apt needs exclusive access to its data, so the caches use the option
+`sharing=locked`, which will make sure multiple parallel builds using
+the same cache mount will wait for each other and not access the same
+cache files at the same time. You could also use `sharing=private` if
+you prefer to have each build create another cache directory in this
+case.
+
+### RUN --mount=type=tmpfs
+
+This mount type allows mounting tmpfs in the build container.
+
+| Option | Description |
+|---------------------|-------------------------------------------------------|
+| `target`[^1] | Mount path. |
+| `size` | Specify an upper limit on the size of the filesystem. |
+
+### RUN --mount=type=secret
+
+This mount type allows the build container to access secure files such as
+private keys without baking them into the image.
+
+| Option | Description |
+|---------------------|---------------------------------------------------------------------------------------------------|
+| `id` | ID of the secret. Defaults to basename of the target path. |
+| `target` | Mount path. Defaults to `/run/secrets/` + `id`. |
+| `required` | If set to `true`, the instruction errors out when the secret is unavailable. Defaults to `false`. |
+| `mode` | File mode for secret file in octal. Default `0400`. |
+| `uid` | User ID for secret file. Default `0`. |
+| `gid` | Group ID for secret file. Default `0`. |
+
+#### Example: access to S3
+
+```dockerfile
+# syntax=docker/dockerfile:1
+FROM python:3
+RUN pip install awscli
+RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
+ aws s3 cp s3://... ...
+```
+
+```console
+$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .
+```
+
+### RUN --mount=type=ssh
+
+This mount type allows the build container to access SSH keys via SSH agents,
+with support for passphrases.
+
+| Option | Description |
+|---------------------|------------------------------------------------------------------------------------------------|
+| `id` | ID of SSH agent socket or key. Defaults to "default". |
+| `target` | SSH agent socket path. Defaults to `/run/buildkit/ssh_agent.${N}`. |
+| `required` | If set to `true`, the instruction errors out when the key is unavailable. Defaults to `false`. |
+| `mode` | File mode for socket in octal. Default `0600`. |
+| `uid` | User ID for socket. Default `0`. |
+| `gid` | Group ID for socket. Default `0`. |
+
+#### Example: access to Gitlab
+
+```dockerfile
+# syntax=docker/dockerfile:1
+FROM alpine
+RUN apk add --no-cache openssh-client
+RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
+RUN --mount=type=ssh \
+ ssh -q -T git@gitlab.com 2>&1 | tee /hello
+# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here
+# with the type of build progress is defined as `plain`.
+```
+
+```console
+$ eval $(ssh-agent)
+$ ssh-add ~/.ssh/id_rsa
+(Input your passphrase here)
+$ docker buildx build --ssh default=$SSH_AUTH_SOCK .
+```
+
+You can also specify a path to `*.pem` file on the host directly instead of `$SSH_AUTH_SOCK`.
+However, pem files with passphrases are not supported.
+
+## RUN --network
+
+> **Note**
+>
+> Added in [`docker/dockerfile:1.1`](#syntax)
+
+`RUN --network` allows control over which networking environment the command
+is run in.
+
+Syntax: `--network=`
+
+### Network types
+
+| Type | Description |
+|----------------------------------------------|----------------------------------------|
+| [`default`](#run---networkdefault) (default) | Run in the default network. |
+| [`none`](#run---networknone) | Run with no network access. |
+| [`host`](#run---networkhost) | Run in the host's network environment. |
+
+### RUN --network=default
+
+Equivalent to not supplying a flag at all, the command is run in the default
+network for the build.
+
+### RUN --network=none
+
+The command is run with no network access (`lo` is still available, but is
+isolated to this process)
+
+#### Example: isolating external effects
+
+```dockerfile
+# syntax=docker/dockerfile:1
+FROM python:3.6
+ADD mypackage.tgz wheels/
+RUN --network=none pip install --find-links wheels mypackage
+```
+
+`pip` will only be able to install the packages provided in the tarfile, which
+can be controlled by an earlier build stage.
+
+### RUN --network=host
+
+The command is run in the host's network environment (similar to
+`docker build --network=host`, but on a per-instruction basis)
+
+> **Warning**
+>
+> The use of `--network=host` is protected by the `network.host` entitlement,
+> which needs to be enabled when starting the buildkitd daemon with
+> `--allow-insecure-entitlement network.host` flag or in [buildkitd config](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md),
+> and for a build request with [`--allow network.host` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/#allow).
+{:.warning}
+
+## RUN --security
+
+> **Note**
+>
+> Not yet available in stable syntax, use [`docker/dockerfile:1-labs`](#syntax) version.
+
+### RUN --security=insecure
+
+With `--security=insecure`, builder runs the command without sandbox in insecure
+mode, which allows to run flows requiring elevated privileges (e.g. containerd).
+This is equivalent to running `docker run --privileged`.
+
+> **Warning**
+>
+> In order to access this feature, entitlement `security.insecure` should be
+> enabled when starting the buildkitd daemon with
+> `--allow-insecure-entitlement security.insecure` flag or in [buildkitd config](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md),
+> and for a build request with [`--allow security.insecure` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/#allow).
+{:.warning}
+
+#### Example: check entitlements
+
+```dockerfile
+# syntax=docker/dockerfile:1-labs
+FROM ubuntu
+RUN --security=insecure cat /proc/self/status | grep CapEff
+```
+```text
+#84 0.093 CapEff: 0000003fffffffff
+```
+
+### RUN --security=sandbox
+
+Default sandbox mode can be activated via `--security=sandbox`, but that is no-op.
+
+## CMD
+
+The `CMD` instruction has three forms:
+
+- `CMD ["executable","param1","param2"]` (*exec* form, this is the preferred form)
+- `CMD ["param1","param2"]` (as *default parameters to ENTRYPOINT*)
+- `CMD command param1 param2` (*shell* form)
+
+There can only be one `CMD` instruction in a `Dockerfile`. If you list more than one `CMD`
+then only the last `CMD` will take effect.
+
+**The main purpose of a `CMD` is to provide defaults for an executing
+container.** These defaults can include an executable, or they can omit
+the executable, in which case you must specify an `ENTRYPOINT`
+instruction as well.
+
+If `CMD` is used to provide default arguments for the `ENTRYPOINT` instruction,
+both the `CMD` and `ENTRYPOINT` instructions should be specified with the JSON
+array format.
+
+> **Note**
+>
+> The *exec* form is parsed as a JSON array, which means that you must use
+> double-quotes (") around words not single-quotes (').
+
+Unlike the *shell* form, the *exec* form does not invoke a command shell.
+This means that normal shell processing does not happen. For example,
+`CMD [ "echo", "$HOME" ]` will not do variable substitution on `$HOME`.
+If you want shell processing then either use the *shell* form or execute
+a shell directly, for example: `CMD [ "sh", "-c", "echo $HOME" ]`.
+When using the exec form and executing a shell directly, as in the case for
+the shell form, it is the shell that is doing the environment variable
+expansion, not docker.
+
+When used in the shell or exec formats, the `CMD` instruction sets the command
+to be executed when running the image.
+
+If you use the *shell* form of the `CMD`, then the `` will execute in
+`/bin/sh -c`:
+
+```dockerfile
+FROM ubuntu
+CMD echo "This is a test." | wc -
+```
+
+If you want to **run your** `