diff --git a/.ci-operator.yaml b/.ci-operator.yaml index 7cd14bfef5..7096df34e3 100644 --- a/.ci-operator.yaml +++ b/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-8-release-golang-1.20-openshift-4.15 + tag: rhel-9-release-golang-1.20-openshift-4.15 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..d3ba3129c1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,21 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-merge-conflict + - id: check-yaml + - id: trailing-whitespace + args: + - --markdown-linebreak-ext=md + - repo: local + hooks: + - id: cpo-containerfiles-in-sync + name: cpo-containerfiles-in-sync + entry: ./hack/tools/git-hooks/cpo-containerfiles-in-sync.sh + language: script + pass_filenames: false + args: + - Containerfile.control-plane + - Dockerfile.control-plane + description: Ensures the CPO container files stay in sync +exclude: '^vendor/|^hack/tools/vendor/|^api/vendor/' diff --git a/.snyk b/.snyk new file mode 100644 index 0000000000..769e512049 --- /dev/null +++ b/.snyk @@ -0,0 +1,7 @@ +# References: +# https://docs.snyk.io/scan-applications/snyk-code/using-snyk-code-from-the-cli/excluding-directories-and-files-from-the-snyk-code-cli-test +# https://docs.snyk.io/snyk-cli/commands/ignore +exclude: + global: + - 'vendor/**' + - 'control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go' diff --git a/.tekton/hypershift-operator-main-pull-request.yaml b/.tekton/hypershift-release-mce-25-pull-request.yaml similarity index 51% rename from .tekton/hypershift-operator-main-pull-request.yaml rename to .tekton/hypershift-release-mce-25-pull-request.yaml index d3c8816f21..2f001dfe09 100644 --- a/.tekton/hypershift-operator-main-pull-request.yaml +++ b/.tekton/hypershift-release-mce-25-pull-request.yaml @@ -1,4 +1,4 @@ -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: PipelineRun metadata: annotations: @@ -7,29 +7,29 @@ metadata: build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "3" - pipelinesascode.tekton.dev/on-event: '[pull_request]' - pipelinesascode.tekton.dev/on-target-branch: '[main]' + pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch + == "release-4.15" creationTimestamp: null labels: - appstudio.openshift.io/application: hypershift-operator - appstudio.openshift.io/component: hypershift-operator-main + appstudio.openshift.io/application: release-mce-25 + appstudio.openshift.io/component: hypershift-release-mce-25 pipelines.appstudio.openshift.io/type: build - name: hypershift-operator-main-on-pull-request + name: hypershift-release-mce-25-on-pull-request namespace: crt-redhat-acm-tenant spec: params: - - name: dockerfile - value: Containerfile.operator - name: git-url - value: '{{repo_url}}' + value: '{{source_url}}' + - name: revision + value: '{{revision}}' + - name: output-image + value: quay.io/redhat-user-workloads/crt-redhat-acm-tenant/release-mce-25/hypershift-release-mce-25:on-pr-{{revision}} - name: image-expires-after value: 5d - - name: output-image - value: quay.io/redhat-user-workloads/crt-redhat-acm-tenant/hypershift-operator/hypershift-operator-main:on-pr-{{revision}} + - name: dockerfile + value: Containerfile.operator - name: path-context value: . - - name: revision - value: '{{revision}}' pipelineSpec: finally: - name: show-sbom @@ -37,8 +37,14 @@ spec: - name: IMAGE_URL value: $(tasks.build-container.results.IMAGE_URL) taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-show-sbom:0.1@sha256:4b8a47b3cd3f80d0a9ec46ec43fd2ebe0487979f351a8eeebe5aa04854d3861c - name: show-sbom + params: + - name: name + value: show-sbom + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:945a7c9066d3e0a95d3fddb7e8a6992e4d632a2a75d8f3a9bd2ff2fef0ec9aa0 + - name: kind + value: task + resolver: bundles - name: show-summary params: - name: pipelinerun-name @@ -50,8 +56,17 @@ spec: - name: build-task-status value: $(tasks.build-container.status) taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-summary:0.1@sha256:6e9a01b2694b8008192bc469e42335ac9392835599cc0b8f82ce11bb1a7be177 - name: summary + params: + - name: name + value: summary + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-summary:0.2@sha256:870d9a04d9784840a90b7bf6817cd0d0c4edfcda04b1ba1868cae625a3c3bfcc + - name: kind + value: task + resolver: bundles + workspaces: + - name: workspace + workspace: workspace params: - description: Source Repository URL name: git-url @@ -64,11 +79,13 @@ spec: name: output-image type: string - default: . - description: The path to your source code + description: Path to the source code of an application's component from where + to build image. name: path-context type: string - default: Dockerfile - description: Path to the Dockerfile + description: Path to the Dockerfile inside the context specified by parameter + path-context name: dockerfile type: string - default: "false" @@ -79,10 +96,6 @@ spec: description: Skip checks against built image name: skip-checks type: string - - default: "true" - description: Skip optional checks, set false if you want to run optional checks - name: skip-optional - type: string - default: "false" description: Execute the build with network isolation name: hermetic @@ -95,14 +108,22 @@ spec: description: Java build name: java type: string - - default: "snyk-secret" - description: Snyk Token Secret Name - name: snyk-secret - type: string - default: "" description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. name: image-expires-after + - default: "false" + description: Build a source image. + name: build-source-image + type: string + - default: [] + description: Array of --build-arg values ("arg=value" strings) for buildah + name: build-args + type: array + - default: "" + description: Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file + name: build-args-file + type: string results: - description: "" name: IMAGE_URL @@ -128,15 +149,15 @@ spec: value: $(params.rebuild) - name: skip-checks value: $(params.skip-checks) - - name: skip-optional - value: $(params.skip-optional) - - name: pipelinerun-name - value: $(context.pipelineRun.name) - - name: pipelinerun-uid - value: $(context.pipelineRun.uid) taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-init:0.1@sha256:159b85246559defbabbd55a42da0b7f618a4307d13bd4d6eb486efb81d1dcfb5 - name: init + params: + - name: name + value: init + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:60063fefe88e111d129cb59caff97c912722927c8a0f750253553d4c527a2396 + - name: kind + value: task + resolver: bundles - name: clone-repository params: - name: url @@ -146,8 +167,14 @@ spec: runAfter: - init taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-git-clone:0.1@sha256:913cdc904919482689e79829daeaa3b4d4cc116aafefd135d5af1fc2f8f1afcd - name: git-clone + params: + - name: name + value: git-clone + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-git-clone:0.1@sha256:d091a9e19567a4cbdc5acd57903c71ba71dc51d749a4ba7477e689608851e981 + - name: kind + value: task + resolver: bundles when: - input: $(tasks.init.results.build) operator: in @@ -165,16 +192,26 @@ spec: runAfter: - clone-repository taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-prefetch-dependencies:0.1@sha256:563b91f72adf9b3b93f990f7c1d140f79749af837135aa07212347cb9ed79a34 - name: prefetch-dependencies + params: + - name: name + value: prefetch-dependencies + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies:0.1@sha256:53fc6d82b06534878e509f3e37f05b818f38fba01729dd1fbee6f97a9562c1ed + - name: kind + value: task + resolver: bundles when: - - input: $(params.hermetic) - operator: in + - input: $(params.prefetch-input) + operator: notin values: - - "true" + - "" workspaces: - name: source workspace: workspace + - name: git-basic-auth + workspace: git-auth + - name: netrc + workspace: netrc - name: build-container params: - name: IMAGE @@ -183,19 +220,30 @@ spec: value: $(params.dockerfile) - name: CONTEXT value: $(params.path-context) - - name: DOCKER_AUTH - value: $(tasks.init.results.container-registry-secret) - name: HERMETIC value: $(params.hermetic) - name: PREFETCH_INPUT value: $(params.prefetch-input) - name: IMAGE_EXPIRES_AFTER value: $(params.image-expires-after) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: BUILD_ARGS + value: + - $(params.build-args[*]) + - name: BUILD_ARGS_FILE + value: $(params.build-args-file) runAfter: - prefetch-dependencies taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-buildah:0.1@sha256:93bb409637d51b37332b768ffa911f27f805229fdf4ba0841f745f83efceefa2 - name: buildah + params: + - name: name + value: buildah + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-buildah:0.2@sha256:a43226b3d578d4ca3e77c7662a4834f1b071ed870323432fd0c4c48049caeda5 + - name: kind + value: task + resolver: bundles when: - input: $(tasks.init.results.build) operator: in @@ -204,83 +252,92 @@ spec: workspaces: - name: source workspace: workspace - - name: inspect-image + - name: build-source-image params: - - name: IMAGE_URL - value: $(tasks.build-container.results.IMAGE_URL) - - name: IMAGE_DIGEST - value: $(tasks.build-container.results.IMAGE_DIGEST) - - name: DOCKER_AUTH - value: $(tasks.init.results.container-registry-secret) + - name: BINARY_IMAGE + value: $(params.output-image) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-inspect-image:0.1@sha256:3a5d3f611240eb5b7b12799c2be22a71803df80dbc12cce2e1e2a252ab543423 - name: inspect-image + params: + - name: name + value: source-build + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-source-build:0.1@sha256:bacd55a3caa34a30bcf51c00f3f719cb3f783e325257f04c27a91f688cbe9644 + - name: kind + value: task + resolver: bundles when: - - input: $(params.skip-checks) + - input: $(tasks.init.results.build) operator: in values: - - "false" - workspaces: - - name: source - workspace: workspace - - name: label-check - runAfter: - - inspect-image - taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-label-check:0.1@sha256:0c0739fdda24cd1e3587bbab9b07d4493efc21884baac7723f4b446e95bf1fd3 - name: label-check - when: - - input: $(params.skip-checks) + - "true" + - input: $(params.build-source-image) operator: in values: - - "false" + - "true" workspaces: - name: workspace workspace: workspace - - name: optional-label-check + - name: deprecated-base-image-check params: - - name: POLICY_NAMESPACE - value: optional_checks + - name: IMAGE_URL + value: $(tasks.build-container.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-container.results.IMAGE_DIGEST) runAfter: - - inspect-image + - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-label-check:0.1@sha256:0c0739fdda24cd1e3587bbab9b07d4493efc21884baac7723f4b446e95bf1fd3 - name: label-check + params: + - name: name + value: deprecated-image-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.4@sha256:f8efb0b22692fad908a1a75f8d5c0b6ed3b0bcd2a9853577e7be275e5bac1bb8 + - name: kind + value: task + resolver: bundles when: - - input: $(params.skip-optional) + - input: $(params.skip-checks) operator: in values: - "false" - workspaces: - - name: workspace - workspace: workspace - - name: deprecated-base-image-check + - name: clair-scan params: - - name: BASE_IMAGES_DIGESTS - value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS) + - name: image-digest + value: $(tasks.build-container.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-container.results.IMAGE_URL) + runAfter: + - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-deprecated-image-check:0.3@sha256:6d7d51064b03e3bebaeeef48018694ee2a0bb2dd94783efea89eb87861516f92 - name: deprecated-image-check + params: + - name: name + value: clair-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:e428b37d253621365ffb24d4053e5f3141988ae6a30fce1c8ba73b7211396eb0 + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in values: - "false" - - name: clair-scan + - name: ecosystem-cert-preflight-checks params: - - name: image-digest - value: $(tasks.build-container.results.IMAGE_DIGEST) - name: image-url value: $(tasks.build-container.results.IMAGE_URL) - - name: docker-auth - value: $(tasks.init.results.container-registry-secret) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-clair-scan:0.1@sha256:3691a67eae1e098959fb18ce415dbc489baf214b2bfa8321f95e096decb87538 - name: clair-scan + params: + - name: name + value: ecosystem-cert-preflight-checks + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.1@sha256:df8a25a3431a70544172ed4844f9d0c6229d39130633960729f825a031a7dea9 + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in @@ -288,22 +345,26 @@ spec: - "false" - name: sast-snyk-check params: - - name: SNYK_SECRET - value: $(params.snyk-secret) + - name: image-digest + value: $(tasks.build-container.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-container.results.IMAGE_URL) runAfter: - - clone-repository + - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-sast-snyk-check:0.1@sha256:d42d958aaf50c604281665f1deb26858d877ba7cc0171d0d3f937f07909c9e05 - name: sast-snyk-check + params: + - name: name + value: sast-snyk-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check:0.3@sha256:df33774c66799947d012605febdc75e82498413ecc6e374774c648006b816cd1 + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in values: - "false" - - input: $(params.snyk-secret) - operator: notin - values: - - "" workspaces: - name: workspace workspace: workspace @@ -313,38 +374,68 @@ spec: value: $(tasks.build-container.results.IMAGE_DIGEST) - name: image-url value: $(tasks.build-container.results.IMAGE_URL) - - name: docker-auth - value: $(tasks.init.results.container-registry-secret) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-clamav-scan:0.1@sha256:1779f85108dcc55837ecb103b6524242872056a04ccdd726bbf1e718dcb8dc30 - name: clamav-scan + params: + - name: name + value: clamav-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:d78221853f7ff2befc6669dd0eeb91e6611ae84ac7754150ea0f071d92ff41cb + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in values: - "false" - - name: sbom-json-check + - name: apply-tags params: - - name: IMAGE_URL + - name: IMAGE + value: $(tasks.build-container.results.IMAGE_URL) + runAfter: + - build-container + taskRef: + params: + - name: name + value: apply-tags + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.1@sha256:0767c115d4ba4854d106c9cdfabdc1f1298bc2742a3fea4fefbac4b9c5873d6e + - name: kind + value: task + resolver: bundles + - name: push-dockerfile + params: + - name: IMAGE value: $(tasks.build-container.results.IMAGE_URL) - name: IMAGE_DIGEST value: $(tasks.build-container.results.IMAGE_DIGEST) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-sbom-json-check:0.1@sha256:ea881dfd625f9afa5d32ac01ca936132003aa33dd273b97388a14a84dd818f60 - name: sbom-json-check - when: - - input: $(params.skip-checks) - operator: in - values: - - "false" + params: + - name: name + value: push-dockerfile + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile:0.1@sha256:48bb2ee92ea528b28c0814c9cc126021e499a081b69431987a774561e9ac8047 + - name: kind + value: task + resolver: bundles + workspaces: + - name: workspace + workspace: workspace workspaces: - name: workspace - name: git-auth optional: true + - name: netrc + optional: true + taskRunTemplate: {} workspaces: - name: workspace volumeClaimTemplate: diff --git a/.tekton/hypershift-operator-main-push.yaml b/.tekton/hypershift-release-mce-25-push.yaml similarity index 51% rename from .tekton/hypershift-operator-main-push.yaml rename to .tekton/hypershift-release-mce-25-push.yaml index 3e76f0f20e..5cb6b88c56 100644 --- a/.tekton/hypershift-operator-main-push.yaml +++ b/.tekton/hypershift-release-mce-25-push.yaml @@ -1,4 +1,4 @@ -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: PipelineRun metadata: annotations: @@ -6,27 +6,27 @@ metadata: build.appstudio.redhat.com/commit_sha: '{{revision}}' build.appstudio.redhat.com/target_branch: '{{target_branch}}' pipelinesascode.tekton.dev/max-keep-runs: "3" - pipelinesascode.tekton.dev/on-event: '[push]' - pipelinesascode.tekton.dev/on-target-branch: '[main]' + pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch + == "release-4.15" creationTimestamp: null labels: - appstudio.openshift.io/application: hypershift-operator - appstudio.openshift.io/component: hypershift-operator-main + appstudio.openshift.io/application: release-mce-25 + appstudio.openshift.io/component: hypershift-release-mce-25 pipelines.appstudio.openshift.io/type: build - name: hypershift-operator-main-on-push + name: hypershift-release-mce-25-on-push namespace: crt-redhat-acm-tenant spec: params: - - name: dockerfile - value: Containerfile.operator - name: git-url - value: '{{repo_url}}' + value: '{{source_url}}' + - name: revision + value: '{{revision}}' - name: output-image - value: quay.io/redhat-user-workloads/crt-redhat-acm-tenant/hypershift-operator/hypershift-operator-main:{{revision}} + value: quay.io/redhat-user-workloads/crt-redhat-acm-tenant/release-mce-25/hypershift-release-mce-25:{{revision}} + - name: dockerfile + value: Containerfile.operator - name: path-context value: . - - name: revision - value: '{{revision}}' pipelineSpec: finally: - name: show-sbom @@ -34,8 +34,14 @@ spec: - name: IMAGE_URL value: $(tasks.build-container.results.IMAGE_URL) taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-show-sbom:0.1@sha256:4b8a47b3cd3f80d0a9ec46ec43fd2ebe0487979f351a8eeebe5aa04854d3861c - name: show-sbom + params: + - name: name + value: show-sbom + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-show-sbom:0.1@sha256:945a7c9066d3e0a95d3fddb7e8a6992e4d632a2a75d8f3a9bd2ff2fef0ec9aa0 + - name: kind + value: task + resolver: bundles - name: show-summary params: - name: pipelinerun-name @@ -47,8 +53,17 @@ spec: - name: build-task-status value: $(tasks.build-container.status) taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-summary:0.1@sha256:6e9a01b2694b8008192bc469e42335ac9392835599cc0b8f82ce11bb1a7be177 - name: summary + params: + - name: name + value: summary + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-summary:0.2@sha256:870d9a04d9784840a90b7bf6817cd0d0c4edfcda04b1ba1868cae625a3c3bfcc + - name: kind + value: task + resolver: bundles + workspaces: + - name: workspace + workspace: workspace params: - description: Source Repository URL name: git-url @@ -61,11 +76,13 @@ spec: name: output-image type: string - default: . - description: The path to your source code + description: Path to the source code of an application's component from where + to build image. name: path-context type: string - default: Dockerfile - description: Path to the Dockerfile + description: Path to the Dockerfile inside the context specified by parameter + path-context name: dockerfile type: string - default: "false" @@ -76,10 +93,6 @@ spec: description: Skip checks against built image name: skip-checks type: string - - default: "true" - description: Skip optional checks, set false if you want to run optional checks - name: skip-optional - type: string - default: "false" description: Execute the build with network isolation name: hermetic @@ -92,14 +105,22 @@ spec: description: Java build name: java type: string - - default: "snyk-secret" - description: Snyk Token Secret Name - name: snyk-secret - type: string - default: "" description: Image tag expiration time, time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively. name: image-expires-after + - default: "false" + description: Build a source image. + name: build-source-image + type: string + - default: [] + description: Array of --build-arg values ("arg=value" strings) for buildah + name: build-args + type: array + - default: "" + description: Path to a file with build arguments for buildah, see https://www.mankier.com/1/buildah-build#--build-arg-file + name: build-args-file + type: string results: - description: "" name: IMAGE_URL @@ -125,15 +146,15 @@ spec: value: $(params.rebuild) - name: skip-checks value: $(params.skip-checks) - - name: skip-optional - value: $(params.skip-optional) - - name: pipelinerun-name - value: $(context.pipelineRun.name) - - name: pipelinerun-uid - value: $(context.pipelineRun.uid) taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-init:0.1@sha256:159b85246559defbabbd55a42da0b7f618a4307d13bd4d6eb486efb81d1dcfb5 - name: init + params: + - name: name + value: init + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-init:0.2@sha256:60063fefe88e111d129cb59caff97c912722927c8a0f750253553d4c527a2396 + - name: kind + value: task + resolver: bundles - name: clone-repository params: - name: url @@ -143,8 +164,14 @@ spec: runAfter: - init taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-git-clone:0.1@sha256:913cdc904919482689e79829daeaa3b4d4cc116aafefd135d5af1fc2f8f1afcd - name: git-clone + params: + - name: name + value: git-clone + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-git-clone:0.1@sha256:d091a9e19567a4cbdc5acd57903c71ba71dc51d749a4ba7477e689608851e981 + - name: kind + value: task + resolver: bundles when: - input: $(tasks.init.results.build) operator: in @@ -162,16 +189,26 @@ spec: runAfter: - clone-repository taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-prefetch-dependencies:0.1@sha256:563b91f72adf9b3b93f990f7c1d140f79749af837135aa07212347cb9ed79a34 - name: prefetch-dependencies + params: + - name: name + value: prefetch-dependencies + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-prefetch-dependencies:0.1@sha256:53fc6d82b06534878e509f3e37f05b818f38fba01729dd1fbee6f97a9562c1ed + - name: kind + value: task + resolver: bundles when: - - input: $(params.hermetic) - operator: in + - input: $(params.prefetch-input) + operator: notin values: - - "true" + - "" workspaces: - name: source workspace: workspace + - name: git-basic-auth + workspace: git-auth + - name: netrc + workspace: netrc - name: build-container params: - name: IMAGE @@ -180,19 +217,30 @@ spec: value: $(params.dockerfile) - name: CONTEXT value: $(params.path-context) - - name: DOCKER_AUTH - value: $(tasks.init.results.container-registry-secret) - name: HERMETIC value: $(params.hermetic) - name: PREFETCH_INPUT value: $(params.prefetch-input) - name: IMAGE_EXPIRES_AFTER value: $(params.image-expires-after) + - name: COMMIT_SHA + value: $(tasks.clone-repository.results.commit) + - name: BUILD_ARGS + value: + - $(params.build-args[*]) + - name: BUILD_ARGS_FILE + value: $(params.build-args-file) runAfter: - prefetch-dependencies taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-buildah:0.1@sha256:93bb409637d51b37332b768ffa911f27f805229fdf4ba0841f745f83efceefa2 - name: buildah + params: + - name: name + value: buildah + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-buildah:0.2@sha256:a43226b3d578d4ca3e77c7662a4834f1b071ed870323432fd0c4c48049caeda5 + - name: kind + value: task + resolver: bundles when: - input: $(tasks.init.results.build) operator: in @@ -201,83 +249,92 @@ spec: workspaces: - name: source workspace: workspace - - name: inspect-image + - name: build-source-image params: - - name: IMAGE_URL - value: $(tasks.build-container.results.IMAGE_URL) - - name: IMAGE_DIGEST - value: $(tasks.build-container.results.IMAGE_DIGEST) - - name: DOCKER_AUTH - value: $(tasks.init.results.container-registry-secret) + - name: BINARY_IMAGE + value: $(params.output-image) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-inspect-image:0.1@sha256:3a5d3f611240eb5b7b12799c2be22a71803df80dbc12cce2e1e2a252ab543423 - name: inspect-image + params: + - name: name + value: source-build + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-source-build:0.1@sha256:bacd55a3caa34a30bcf51c00f3f719cb3f783e325257f04c27a91f688cbe9644 + - name: kind + value: task + resolver: bundles when: - - input: $(params.skip-checks) + - input: $(tasks.init.results.build) operator: in values: - - "false" - workspaces: - - name: source - workspace: workspace - - name: label-check - runAfter: - - inspect-image - taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-label-check:0.1@sha256:0c0739fdda24cd1e3587bbab9b07d4493efc21884baac7723f4b446e95bf1fd3 - name: label-check - when: - - input: $(params.skip-checks) + - "true" + - input: $(params.build-source-image) operator: in values: - - "false" + - "true" workspaces: - name: workspace workspace: workspace - - name: optional-label-check + - name: deprecated-base-image-check params: - - name: POLICY_NAMESPACE - value: optional_checks + - name: IMAGE_URL + value: $(tasks.build-container.results.IMAGE_URL) + - name: IMAGE_DIGEST + value: $(tasks.build-container.results.IMAGE_DIGEST) runAfter: - - inspect-image + - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-label-check:0.1@sha256:0c0739fdda24cd1e3587bbab9b07d4493efc21884baac7723f4b446e95bf1fd3 - name: label-check + params: + - name: name + value: deprecated-image-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-deprecated-image-check:0.4@sha256:f8efb0b22692fad908a1a75f8d5c0b6ed3b0bcd2a9853577e7be275e5bac1bb8 + - name: kind + value: task + resolver: bundles when: - - input: $(params.skip-optional) + - input: $(params.skip-checks) operator: in values: - "false" - workspaces: - - name: workspace - workspace: workspace - - name: deprecated-base-image-check + - name: clair-scan params: - - name: BASE_IMAGES_DIGESTS - value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS) + - name: image-digest + value: $(tasks.build-container.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-container.results.IMAGE_URL) + runAfter: + - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-deprecated-image-check:0.3@sha256:6d7d51064b03e3bebaeeef48018694ee2a0bb2dd94783efea89eb87861516f92 - name: deprecated-image-check + params: + - name: name + value: clair-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clair-scan:0.2@sha256:e428b37d253621365ffb24d4053e5f3141988ae6a30fce1c8ba73b7211396eb0 + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in values: - "false" - - name: clair-scan + - name: ecosystem-cert-preflight-checks params: - - name: image-digest - value: $(tasks.build-container.results.IMAGE_DIGEST) - name: image-url value: $(tasks.build-container.results.IMAGE_URL) - - name: docker-auth - value: $(tasks.init.results.container-registry-secret) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-clair-scan:0.1@sha256:3691a67eae1e098959fb18ce415dbc489baf214b2bfa8321f95e096decb87538 - name: clair-scan + params: + - name: name + value: ecosystem-cert-preflight-checks + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-ecosystem-cert-preflight-checks:0.1@sha256:df8a25a3431a70544172ed4844f9d0c6229d39130633960729f825a031a7dea9 + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in @@ -285,22 +342,26 @@ spec: - "false" - name: sast-snyk-check params: - - name: SNYK_SECRET - value: $(params.snyk-secret) + - name: image-digest + value: $(tasks.build-container.results.IMAGE_DIGEST) + - name: image-url + value: $(tasks.build-container.results.IMAGE_URL) runAfter: - - clone-repository + - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-sast-snyk-check:0.1@sha256:d42d958aaf50c604281665f1deb26858d877ba7cc0171d0d3f937f07909c9e05 - name: sast-snyk-check + params: + - name: name + value: sast-snyk-check + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-sast-snyk-check:0.3@sha256:df33774c66799947d012605febdc75e82498413ecc6e374774c648006b816cd1 + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in values: - "false" - - input: $(params.snyk-secret) - operator: notin - values: - - "" workspaces: - name: workspace workspace: workspace @@ -310,38 +371,68 @@ spec: value: $(tasks.build-container.results.IMAGE_DIGEST) - name: image-url value: $(tasks.build-container.results.IMAGE_URL) - - name: docker-auth - value: $(tasks.init.results.container-registry-secret) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-clamav-scan:0.1@sha256:1779f85108dcc55837ecb103b6524242872056a04ccdd726bbf1e718dcb8dc30 - name: clamav-scan + params: + - name: name + value: clamav-scan + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-clamav-scan:0.2@sha256:d78221853f7ff2befc6669dd0eeb91e6611ae84ac7754150ea0f071d92ff41cb + - name: kind + value: task + resolver: bundles when: - input: $(params.skip-checks) operator: in values: - "false" - - name: sbom-json-check + - name: apply-tags params: - - name: IMAGE_URL + - name: IMAGE + value: $(tasks.build-container.results.IMAGE_URL) + runAfter: + - build-container + taskRef: + params: + - name: name + value: apply-tags + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-apply-tags:0.1@sha256:0767c115d4ba4854d106c9cdfabdc1f1298bc2742a3fea4fefbac4b9c5873d6e + - name: kind + value: task + resolver: bundles + - name: push-dockerfile + params: + - name: IMAGE value: $(tasks.build-container.results.IMAGE_URL) - name: IMAGE_DIGEST value: $(tasks.build-container.results.IMAGE_DIGEST) + - name: DOCKERFILE + value: $(params.dockerfile) + - name: CONTEXT + value: $(params.path-context) runAfter: - build-container taskRef: - bundle: quay.io/redhat-appstudio-tekton-catalog/task-sbom-json-check:0.1@sha256:ea881dfd625f9afa5d32ac01ca936132003aa33dd273b97388a14a84dd818f60 - name: sbom-json-check - when: - - input: $(params.skip-checks) - operator: in - values: - - "false" + params: + - name: name + value: push-dockerfile + - name: bundle + value: quay.io/konflux-ci/tekton-catalog/task-push-dockerfile:0.1@sha256:48bb2ee92ea528b28c0814c9cc126021e499a081b69431987a774561e9ac8047 + - name: kind + value: task + resolver: bundles + workspaces: + - name: workspace + workspace: workspace workspaces: - name: workspace - name: git-auth optional: true + - name: netrc + optional: true + taskRunTemplate: {} workspaces: - name: workspace volumeClaimTemplate: diff --git a/Containerfile.control-plane b/Containerfile.control-plane new file mode 100644 index 0000000000..b7c9b7a978 --- /dev/null +++ b/Containerfile.control-plane @@ -0,0 +1,27 @@ +FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_9_1.22 AS builder + +WORKDIR /hypershift + +COPY . . + +RUN make control-plane-operator control-plane-pki-operator + +FROM registry.redhat.io/rhel9-2-els/rhel:9.2 +COPY --from=builder /hypershift/bin/control-plane-operator /usr/bin/control-plane-operator +COPY --from=builder /hypershift/bin/control-plane-pki-operator /usr/bin/control-plane-pki-operator + +ENTRYPOINT /usr/bin/control-plane-operator + +LABEL io.openshift.release.operator=true +LABEL io.openshift.hypershift.control-plane-operator-subcommands=true +LABEL io.openshift.hypershift.control-plane-operator-skips-haproxy=true +LABEL io.openshift.hypershift.ignition-server-healthz-handler=true +LABEL io.openshift.hypershift.control-plane-operator-manages-ignition-server=true +LABEL io.openshift.hypershift.control-plane-operator-manages.cluster-machine-approver=true +LABEL io.openshift.hypershift.control-plane-operator-manages.cluster-autoscaler=true +LABEL io.openshift.hypershift.control-plane-operator-manages.decompress-decode-config=true +LABEL io.openshift.hypershift.control-plane-operator-creates-aws-sg=true +LABEL io.openshift.hypershift.control-plane-operator-applies-management-kas-network-policy-label=true +LABEL io.openshift.hypershift.restricted-psa=true +LABEL io.openshift.hypershift.control-plane-pki-operator-signs-csrs=true +LABEL io.openshift.hypershift.hosted-cluster-config-operator-reports-node-count=true diff --git a/Containerfile.operator b/Containerfile.operator index 41e584bb22..13aae5a618 100644 --- a/Containerfile.operator +++ b/Containerfile.operator @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/go-toolset:1.20 as builder +FROM registry.access.redhat.com/ubi9/go-toolset:9.5 as builder COPY . . @@ -14,7 +14,7 @@ RUN chmod g+w . && \ git config --global --add safe.directory "$PWD" && \ make build -FROM registry.access.redhat.com/ubi9-minimal:9.2 +FROM registry.access.redhat.com/ubi9-minimal:9.5-1731604394 COPY --from=builder \ /opt/app-root/src/bin/hypershift \ /opt/app-root/src/bin/hcp \ @@ -50,3 +50,4 @@ LABEL io.openshift.hypershift.control-plane-operator-manages.decompress-decode-c LABEL io.openshift.hypershift.control-plane-operator-creates-aws-sg=true LABEL io.openshift.hypershift.control-plane-operator-applies-management-kas-network-policy-label=true LABEL io.openshift.hypershift.restricted-psa=true +LABEL io.openshift.hypershift.control-plane-pki-operator-signs-csrs=true diff --git a/Dockerfile b/Dockerfile index 38a094923d..005ebd749b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,3 +32,4 @@ LABEL io.openshift.hypershift.control-plane-operator-manages.decompress-decode-c LABEL io.openshift.hypershift.control-plane-operator-creates-aws-sg=true LABEL io.openshift.hypershift.control-plane-operator-applies-management-kas-network-policy-label=true LABEL io.openshift.hypershift.restricted-psa=true +LABEL io.openshift.hypershift.control-plane-pki-operator-signs-csrs=true diff --git a/Dockerfile.control-plane b/Dockerfile.control-plane index b654a7e5a6..d7094a03f1 100644 --- a/Dockerfile.control-plane +++ b/Dockerfile.control-plane @@ -1,4 +1,4 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-8-golang-1.20-openshift-4.15 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.20-openshift-4.15 AS builder WORKDIR /hypershift @@ -6,7 +6,7 @@ COPY . . RUN make control-plane-operator control-plane-pki-operator -FROM registry.ci.openshift.org/ocp/4.15:base +FROM registry.ci.openshift.org/ocp/4.15:base-rhel9 COPY --from=builder /hypershift/bin/control-plane-operator /usr/bin/control-plane-operator COPY --from=builder /hypershift/bin/control-plane-pki-operator /usr/bin/control-plane-pki-operator @@ -23,3 +23,4 @@ LABEL io.openshift.hypershift.control-plane-operator-manages.decompress-decode-c LABEL io.openshift.hypershift.control-plane-operator-creates-aws-sg=true LABEL io.openshift.hypershift.control-plane-operator-applies-management-kas-network-policy-label=true LABEL io.openshift.hypershift.restricted-psa=true +LABEL io.openshift.hypershift.control-plane-pki-operator-signs-csrs=true diff --git a/Makefile b/Makefile index 8311a2494b..ee098a91ef 100644 --- a/Makefile +++ b/Makefile @@ -94,9 +94,9 @@ api: hypershift-api cluster-api cluster-api-provider-aws cluster-api-provider-ib .PHONY: hypershift-api hypershift-api: $(CONTROLLER_GEN) - $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./api/hypershift/..." + $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./api/..." rm -rf cmd/install/assets/hypershift-operator/*.yaml - $(CONTROLLER_GEN) $(CRD_OPTIONS) paths="./api/hypershift/..." output:crd:artifacts:config=cmd/install/assets/hypershift-operator + $(CONTROLLER_GEN) $(CRD_OPTIONS) paths="./api/..." output:crd:artifacts:config=cmd/install/assets/hypershift-operator .PHONY: cluster-api cluster-api: $(CONTROLLER_GEN) @@ -184,7 +184,7 @@ fmt: # Run go vet against code .PHONY: vet vet: - $(GO) vet ./... + $(GO) vet -tags integration,e2e ./... .PHONY: promtool promtool: @@ -204,6 +204,7 @@ deps: staticcheck: $(STATICCHECK) $(STATICCHECK) \ ./control-plane-operator/... \ + ./control-plane-pki-operator/... \ ./hypershift-operator/controllers/... \ ./ignition-server/... \ ./cmd/... \ @@ -212,7 +213,8 @@ staticcheck: $(STATICCHECK) ./support/upsert/... \ ./konnectivity-socks5-proxy/... \ ./contrib/... \ - ./availability-prober/... + ./availability-prober/... \ + ./test/integration/... \ # Build the docker image with official golang image .PHONY: docker-build @@ -261,3 +263,8 @@ ci-install-hypershift-private: .PHONY: ci-test-e2e ci-test-e2e: hack/ci-test-e2e.sh ${CI_TESTS_RUN} + +.PHONY: regenerate-pki +regenerate-pki: + REGENERATE_PKI=1 $(GO) test ./control-plane-pki-operator/... + REGENERATE_PKI=1 $(GO) test ./test/e2e/... -run TestRegeneratePKI \ No newline at end of file diff --git a/OWNERS b/OWNERS index 0cb2691244..f2f0d3fb60 100644 --- a/OWNERS +++ b/OWNERS @@ -3,6 +3,9 @@ approvers: - csrwng - sjenning - davidvossel +- muraee +- bryan-cox +- jparrill options: {} reviewers: - enxebre diff --git a/api/certificates/register.go b/api/certificates/register.go new file mode 100644 index 0000000000..d833455317 --- /dev/null +++ b/api/certificates/register.go @@ -0,0 +1,3 @@ +package certificates + +const GroupName = "certificates.hypershift.openshift.io" diff --git a/api/certificates/v1alpha1/certificaterevocationrequest_types.go b/api/certificates/v1alpha1/certificaterevocationrequest_types.go new file mode 100644 index 0000000000..2078ac22a3 --- /dev/null +++ b/api/certificates/v1alpha1/certificaterevocationrequest_types.go @@ -0,0 +1,82 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:resource:path=certificaterevocationrequests,shortName=crr;crrs,scope=Namespaced +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion + +// CertificateRevocationRequest defines the desired state of CertificateRevocationRequest. +// A request denotes the user's desire to revoke a signer certificate of the class indicated in spec. +type CertificateRevocationRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CertificateRevocationRequestSpec `json:"spec,omitempty"` + Status CertificateRevocationRequestStatus `json:"status,omitempty"` +} + +// CertificateRevocationRequestSpec defines the desired state of CertificateRevocationRequest +type CertificateRevocationRequestSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=customer-break-glass;sre-break-glass + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="signerClass is immutable" + + // SignerClass identifies the class of signer to revoke. All the active signing CAs for the + // signer class will be revoked. + SignerClass string `json:"signerClass"` +} + +const ( + SignerClassValidType string = "SignerClassValid" + SignerClassUnknownReason string = "SignerClassUnknown" + + RootCertificatesRegeneratedType string = "RootCertificatesRegenerated" + RootCertificatesStaleReason string = "RootCertificatesStale" + + LeafCertificatesRegeneratedType string = "LeafCertificatesRegenerated" + LeafCertificatesStaleReason string = "LeafCertificatesStale" + + NewCertificatesTrustedType = "NewCertificatesTrusted" + PreviousCertificatesRevokedType = "PreviousCertificatesRevoked" +) + +// CertificateRevocationRequestStatus defines the observed state of CertificateRevocationRequest +type CertificateRevocationRequestStatus struct { + // +optional + + // RevocationTimestamp is the cut-off time for signing CAs to be revoked. All certificates that + // are valid before this time will be revoked; all re-generated certificates will not be valid + // at or before this time. + RevocationTimestamp *metav1.Time `json:"revocationTimestamp,omitempty"` + + // +optional + + // PreviousSigner stores a reference to the previous signer certificate. We require + // storing this data to ensure that we can validate that the old signer is no longer + // valid before considering revocation complete. + PreviousSigner *corev1.LocalObjectReference `json:"previousSigner,omitempty"` + + // +optional + // +listType=map + // +listMapKey=type + // +patchMergeKey=type + // +patchStrategy=merge + + // Conditions contain details about the various aspects of certificate revocation. + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +kubebuilder:object:root=true + +// CertificateRevocationRequestList contains a list of CertificateRevocationRequest. +type CertificateRevocationRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CertificateRevocationRequest `json:"items"` +} diff --git a/api/certificates/v1alpha1/certificatesigningrequestapproval_types.go b/api/certificates/v1alpha1/certificatesigningrequestapproval_types.go new file mode 100644 index 0000000000..a898236291 --- /dev/null +++ b/api/certificates/v1alpha1/certificatesigningrequestapproval_types.go @@ -0,0 +1,34 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:resource:path=certificatesigningrequestapprovals,shortName=csra;csras,scope=Namespaced +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// CertificateSigningRequestApproval defines the desired state of CertificateSigningRequestApproval +type CertificateSigningRequestApproval struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CertificateSigningRequestApprovalSpec `json:"spec,omitempty"` + Status CertificateSigningRequestApprovalStatus `json:"status,omitempty"` +} + +// CertificateSigningRequestApprovalSpec defines the desired state of CertificateSigningRequestApproval +type CertificateSigningRequestApprovalSpec struct{} + +// CertificateSigningRequestApprovalStatus defines the observed state of CertificateSigningRequestApproval +type CertificateSigningRequestApprovalStatus struct{} + +// +kubebuilder:object:root=true + +// CertificateSigningRequestApprovalList contains a list of CertificateSigningRequestApprovals. +type CertificateSigningRequestApprovalList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CertificateSigningRequestApproval `json:"items"` +} diff --git a/api/certificates/v1alpha1/doc.go b/api/certificates/v1alpha1/doc.go new file mode 100644 index 0000000000..94981313a7 --- /dev/null +++ b/api/certificates/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register +// +groupName=certificates.hypershift.openshift.io +// +k8s:openapi-gen=true +package v1alpha1 diff --git a/api/certificates/v1alpha1/register.go b/api/certificates/v1alpha1/register.go new file mode 100644 index 0000000000..558235401e --- /dev/null +++ b/api/certificates/v1alpha1/register.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/openshift/hypershift/api/certificates" +) + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: certificates.GroupName, Version: "v1alpha1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind. +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CertificateSigningRequestApproval{}, + &CertificateSigningRequestApprovalList{}, + + &CertificateRevocationRequest{}, + &CertificateRevocationRequestList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/api/certificates/v1alpha1/zz_generated.deepcopy.go b/api/certificates/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..8c9f6481dc --- /dev/null +++ b/api/certificates/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,222 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRevocationRequest) DeepCopyInto(out *CertificateRevocationRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRevocationRequest. +func (in *CertificateRevocationRequest) DeepCopy() *CertificateRevocationRequest { + if in == nil { + return nil + } + out := new(CertificateRevocationRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateRevocationRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRevocationRequestList) DeepCopyInto(out *CertificateRevocationRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateRevocationRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRevocationRequestList. +func (in *CertificateRevocationRequestList) DeepCopy() *CertificateRevocationRequestList { + if in == nil { + return nil + } + out := new(CertificateRevocationRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateRevocationRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRevocationRequestSpec) DeepCopyInto(out *CertificateRevocationRequestSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRevocationRequestSpec. +func (in *CertificateRevocationRequestSpec) DeepCopy() *CertificateRevocationRequestSpec { + if in == nil { + return nil + } + out := new(CertificateRevocationRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateRevocationRequestStatus) DeepCopyInto(out *CertificateRevocationRequestStatus) { + *out = *in + if in.RevocationTimestamp != nil { + in, out := &in.RevocationTimestamp, &out.RevocationTimestamp + *out = (*in).DeepCopy() + } + if in.PreviousSigner != nil { + in, out := &in.PreviousSigner, &out.PreviousSigner + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateRevocationRequestStatus. +func (in *CertificateRevocationRequestStatus) DeepCopy() *CertificateRevocationRequestStatus { + if in == nil { + return nil + } + out := new(CertificateRevocationRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApproval) DeepCopyInto(out *CertificateSigningRequestApproval) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApproval. +func (in *CertificateSigningRequestApproval) DeepCopy() *CertificateSigningRequestApproval { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApproval) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestApproval) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalList) DeepCopyInto(out *CertificateSigningRequestApprovalList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequestApproval, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalList. +func (in *CertificateSigningRequestApprovalList) DeepCopy() *CertificateSigningRequestApprovalList { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestApprovalList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalSpec) DeepCopyInto(out *CertificateSigningRequestApprovalSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalSpec. +func (in *CertificateSigningRequestApprovalSpec) DeepCopy() *CertificateSigningRequestApprovalSpec { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalStatus) DeepCopyInto(out *CertificateSigningRequestApprovalStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalStatus. +func (in *CertificateSigningRequestApprovalStatus) DeepCopy() *CertificateSigningRequestApprovalStatus { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalStatus) + in.DeepCopyInto(out) + return out +} diff --git a/api/fixtures/example.go b/api/fixtures/example.go index 3d9bc45e6e..3d6b5d0376 100644 --- a/api/fixtures/example.go +++ b/api/fixtures/example.go @@ -3,10 +3,11 @@ package fixtures import ( "crypto/rand" "fmt" - "github.com/openshift/hypershift/cmd/util" "strings" "time" + "github.com/openshift/hypershift/cmd/util" + rbacv1 "k8s.io/api/rbac/v1" corev1 "k8s.io/api/core/v1" @@ -81,6 +82,7 @@ type ExampleOptions struct { Arch string PausedUntil string OLMCatalogPlacement hyperv1.OLMCatalogPlacement + OperatorHub *configv1.OperatorHubSpec AWS *ExampleAWSOptions None *ExampleNoneOptions Agent *ExampleAgentOptions @@ -516,6 +518,14 @@ func (o ExampleOptions) Resources() *ExampleResources { cluster.Spec.PausedUntil = &o.PausedUntil } + if cluster.Spec.Configuration == nil { + cluster.Spec.Configuration = &hyperv1.ClusterConfiguration{} + } + + if o.OperatorHub != nil { + cluster.Spec.Configuration.OperatorHub = o.OperatorHub + } + if len(o.OLMCatalogPlacement) > 0 { cluster.Spec.OLMCatalogPlacement = hyperv1.OLMCatalogPlacement(o.OLMCatalogPlacement) } diff --git a/api/fixtures/example_kubevirt.go b/api/fixtures/example_kubevirt.go index 4224c34561..ce4804abec 100644 --- a/api/fixtures/example_kubevirt.go +++ b/api/fixtures/example_kubevirt.go @@ -28,6 +28,7 @@ type ExampleKubevirtOptions struct { QoSClass *hyperv1.QoSClass AdditionalNetworks []hyperv1.KubevirtNetwork AttachDefaultNetwork *bool + VmNodeSelector map[string]string } func ExampleKubeVirtTemplate(o *ExampleKubevirtOptions) *hyperv1.KubevirtNodePoolPlatform { @@ -99,5 +100,9 @@ func ExampleKubeVirtTemplate(o *ExampleKubevirtOptions) *hyperv1.KubevirtNodePoo exampleTemplate.NetworkInterfaceMultiQueue = o.NetworkInterfaceMultiQueue } + if o.VmNodeSelector != nil && len(o.VmNodeSelector) > 0 { + exampleTemplate.NodeSelector = o.VmNodeSelector + } + return exampleTemplate } diff --git a/api/hypershift/v1alpha1/hosted_controlplane.go b/api/hypershift/v1alpha1/hosted_controlplane.go index 265929cfd2..ba6ad435c5 100644 --- a/api/hypershift/v1alpha1/hosted_controlplane.go +++ b/api/hypershift/v1alpha1/hosted_controlplane.go @@ -119,6 +119,8 @@ type HostedControlPlaneSpec struct { // critical control plane components. The default value is SingleReplica. // // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ControllerAvailabilityPolicy is immutable" // +kubebuilder:default:="SingleReplica" ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` diff --git a/api/hypershift/v1alpha1/hostedcluster_types.go b/api/hypershift/v1alpha1/hostedcluster_types.go index 2c8fc15562..506a4b6585 100644 --- a/api/hypershift/v1alpha1/hostedcluster_types.go +++ b/api/hypershift/v1alpha1/hostedcluster_types.go @@ -2115,6 +2115,12 @@ type ClusterConfiguration struct { // +optional OAuth *configv1.OAuthSpec `json:"oauth,omitempty"` + // OperatorHub specifies the configuration for the Operator Lifecycle Manager in the HostedCluster. This is only configured at deployment time but the controller are not reconcilling over it. + // The OperatorHub configuration will be constantly reconciled if catalog placement is management, but only on cluster creation otherwise. + // + // +optional + OperatorHub *configv1.OperatorHubSpec `json:"operatorhub,omitempty"` + // Scheduler holds cluster-wide config information to run the Kubernetes Scheduler // and influence its placement decisions. The canonical name for this config is `cluster`. // +optional diff --git a/api/hypershift/v1alpha1/nodepool_types.go b/api/hypershift/v1alpha1/nodepool_types.go index 6640b4ec74..dae7f03190 100644 --- a/api/hypershift/v1alpha1/nodepool_types.go +++ b/api/hypershift/v1alpha1/nodepool_types.go @@ -586,8 +586,8 @@ type KubevirtCompute struct { // https://kubevirt.io/user-guide/operations/node_overcommit/#requesting-the-right-qos-class-for-virtualmachineinstances // // +optional - //+kubebuilder:validation:Enum=Burstable;Guaranteed - //+kubebuilder:default=Burstable + // +kubebuilder:validation:Enum=Burstable;Guaranteed + // +kubebuilder:default=Burstable QosClass *QoSClass `json:"qosClass,omitempty"` } @@ -726,6 +726,13 @@ type KubevirtNodePoolPlatform struct { // +optional // +kubebuilder:default=true AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` + + // NodeSelector is a selector which must be true for the kubevirt VirtualMachine to fit on a node. + // Selector which must match a node's labels for the VM to be scheduled on that node. More info: + // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` } // KubevirtNetwork specifies the configuration for a virtual machine diff --git a/api/hypershift/v1alpha1/zz_generated.deepcopy.go b/api/hypershift/v1alpha1/zz_generated.deepcopy.go index dfa24b424c..e51e969405 100644 --- a/api/hypershift/v1alpha1/zz_generated.deepcopy.go +++ b/api/hypershift/v1alpha1/zz_generated.deepcopy.go @@ -628,6 +628,11 @@ func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) { *out = new(configv1.OAuthSpec) (*in).DeepCopyInto(*out) } + if in.OperatorHub != nil { + in, out := &in.OperatorHub, &out.OperatorHub + *out = new(configv1.OperatorHubSpec) + (*in).DeepCopyInto(*out) + } if in.Scheduler != nil { in, out := &in.Scheduler, &out.Scheduler *out = new(configv1.SchedulerSpec) @@ -1555,6 +1560,13 @@ func (in *KubevirtNodePoolPlatform) DeepCopyInto(out *KubevirtNodePoolPlatform) *out = new(bool) **out = **in } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNodePoolPlatform. diff --git a/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go b/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go new file mode 100644 index 0000000000..00d7ce48d7 --- /dev/null +++ b/api/hypershift/v1beta1/certificatesigningrequestapproval_types.go @@ -0,0 +1,34 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +kubebuilder:resource:path=certificatesigningrequestapprovals,shortName=csra;csras,scope=Namespaced +// +kubebuilder:object:root=true +// +kubebuilder:storageversion + +// CertificateSigningRequestApproval defines the desired state of CertificateSigningRequestApproval +type CertificateSigningRequestApproval struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CertificateSigningRequestApprovalSpec `json:"spec,omitempty"` + Status CertificateSigningRequestApprovalStatus `json:"status,omitempty"` +} + +// CertificateSigningRequestApprovalSpec defines the desired state of CertificateSigningRequestApproval +type CertificateSigningRequestApprovalSpec struct{} + +// CertificateSigningRequestApprovalStatus defines the observed state of CertificateSigningRequestApproval +type CertificateSigningRequestApprovalStatus struct{} + +// +kubebuilder:object:root=true + +// CertificateSigningRequestApprovalList contains a list of CertificateSigningRequestApprovals. +type CertificateSigningRequestApprovalList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CertificateSigningRequestApproval `json:"items"` +} diff --git a/api/hypershift/v1beta1/hosted_controlplane.go b/api/hypershift/v1beta1/hosted_controlplane.go index 114df8e2ce..6bb31ec12a 100644 --- a/api/hypershift/v1beta1/hosted_controlplane.go +++ b/api/hypershift/v1beta1/hosted_controlplane.go @@ -79,6 +79,8 @@ type HostedControlPlaneSpec struct { // critical control plane components. The default value is SingleReplica. // // +optional + // +immutable + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ControllerAvailabilityPolicy is immutable" // +kubebuilder:default:="SingleReplica" ControllerAvailabilityPolicy AvailabilityPolicy `json:"controllerAvailabilityPolicy,omitempty"` diff --git a/api/hypershift/v1beta1/hostedcluster_conditions.go b/api/hypershift/v1beta1/hostedcluster_conditions.go index fc5dbb00a0..a98f84cce6 100644 --- a/api/hypershift/v1beta1/hostedcluster_conditions.go +++ b/api/hypershift/v1beta1/hostedcluster_conditions.go @@ -99,6 +99,11 @@ const ( // A failure here may require external user intervention to resolve. E.g. oidc was deleted out of band. ValidOIDCConfiguration ConditionType = "ValidOIDCConfiguration" + // ValidIDPConfiguration indicates if the Identity Provider configuration is valid. + // A failure here may require external user intervention to resolve + // e.g. the user-provided IDP configuration provided is invalid or the IDP is not reachable. + ValidIDPConfiguration ConditionType = "ValidIDPConfiguration" + // ValidReleaseImage indicates if the release image set in the spec is valid // for the HostedCluster. For example, this can be set false if the // HostedCluster itself attempts an unsupported version before 4.9 or an @@ -146,6 +151,12 @@ const ( // blocked from creating machines. AWSDefaultSecurityGroupCreated ConditionType = "AWSDefaultSecurityGroupCreated" + // AWSDefaultSecurityGroupDeleted indicates whether the default security group + // for AWS workers has been deleted. + // A failure here indicates that the Security Group has some dependencies that + // there are still pending cloud resources to be deleted that are using that SG. + AWSDefaultSecurityGroupDeleted ConditionType = "AWSDefaultSecurityGroupDeleted" + // PlatformCredentialsFound indicates that credentials required for the // desired platform are valid. // A failure here is unlikely to resolve without the changing user input. @@ -168,6 +179,7 @@ const ( WaitingForAvailableReason = "WaitingForAvailable" SecretNotFoundReason = "SecretNotFound" WaitingForGracePeriodReason = "WaitingForGracePeriod" + BlockedReason = "Blocked" InfraStatusFailureReason = "InfraStatusFailure" WaitingOnInfrastructureReadyReason = "WaitingOnInfrastructureReady" diff --git a/api/hypershift/v1beta1/hostedcluster_types.go b/api/hypershift/v1beta1/hostedcluster_types.go index c6353c2a0c..240c2dcf23 100644 --- a/api/hypershift/v1beta1/hostedcluster_types.go +++ b/api/hypershift/v1beta1/hostedcluster_types.go @@ -41,9 +41,13 @@ const ( // KonnectivityAgentImageAnnotation is a temporary annotation that allows the specification of the konnectivity agent image. // This will be removed when Konnectivity is added to the Openshift release payload KonnectivityAgentImageAnnotation = "hypershift.openshift.io/konnectivity-agent-image" - // ControlPlaneOperatorImageAnnotation is a annotation that allows the specification of the control plane operator image. + // ControlPlaneOperatorImageAnnotation is an annotation that allows the specification of the control plane operator image. // This is used for development and e2e workflows ControlPlaneOperatorImageAnnotation = "hypershift.openshift.io/control-plane-operator-image" + // ControlPlaneOperatorImageLabelsAnnotation is an annotation that allows the specification of the control plane operator image labels. + // Labels are provided in a comma-delimited format: key=value,key2=value2 + // This is used for development and e2e workflows + ControlPlaneOperatorImageLabelsAnnotation = "hypershift.openshift.io/control-plane-operator-image-labels" // RestartDateAnnotation is a annotation that can be used to trigger a rolling restart of all components managed by hypershift. // it is important in some situations like CA rotation where components need to be fully restarted to pick up new CAs. It's also // important in some recovery situations where a fresh start of the component helps fix symptoms a user might be experiencing. @@ -66,11 +70,13 @@ const ( // PortierisImageAnnotation is an annotation that allows the specification of the portieries component // (performs container image verification). PortierisImageAnnotation = "hypershift.openshift.io/portieris-image" - // Configure ingress controller with endpoint publishing strategy as Private. + // PrivateIngressControllerAnnotation is an annotation that configures ingress controller with endpoint publishing strategy as Private. // This overrides any opinionated strategy set by platform in ReconcileDefaultIngressController. // It's used by IBM cloud to support ingress endpoint publishing strategy scope // NOTE: We'll expose this in the API if the use case gets generalised. PrivateIngressControllerAnnotation = "hypershift.openshift.io/private-ingress-controller" + // IngressControllerLoadBalancerScope is an annotation that allows the specification of the LoadBalancer scope for ingress controller. + IngressControllerLoadBalancerScope = "hypershift.openshift.io/ingress-controller-load-balancer-scope" // CertifiedOperatorsCatalogImageAnnotation, CommunityOperatorsCatalogImageAnnotation, RedHatMarketplaceCatalogImageAnnotation and RedHatOperatorsCatalogImageAnnotation // are annotations that can be used to override the address of the images used for the OLM catalogs if in the `management` OLMCatalogPlacement mode. @@ -218,6 +224,13 @@ const ( // components should be scheduled on dedicated nodes in the management cluster. DedicatedRequestServingComponentsTopology = "dedicated-request-serving-components" + // RequestServingNodeAdditionalSelectorAnnotation is used to specify an additional node selector for + // request serving nodes. The value is a comma-separated list of key=value pairs. + RequestServingNodeAdditionalSelectorAnnotation = "hypershift.openshift.io/request-serving-node-additional-selector" + + // DisableMachineManagement Disable deployments related to machine management that includes cluster-api, cluster-autoscaler, machine-approver. + DisableMachineManagement = "hypershift.openshift.io/disable-machine-management" + // AllowGuestWebhooksServiceLabel marks a service deployed in the control plane as a valid target // for validating/mutating webhooks running in the guest cluster. AllowGuestWebhooksServiceLabel = "hypershift.openshift.io/allow-guest-webhooks" @@ -227,11 +240,39 @@ const ( // See https://github.com/openshift/enhancements/blob/master/enhancements/authentication/pod-security-admission.md PodSecurityAdmissionLabelOverrideAnnotation = "hypershift.openshift.io/pod-security-admission-label-override" - //DisableMonitoringServices introduces an option to disable monitor services IBM Cloud do not use. + // DisableMonitoringServices introduces an option to disable monitor services IBM Cloud do not use. DisableMonitoringServices = "hypershift.openshift.io/disable-monitoring-services" // JSONPatchAnnotation allow modifying the kubevirt VM template using jsonpatch JSONPatchAnnotation = "hypershift.openshift.io/kubevirt-vm-jsonpatch" + + // KubeAPIServerGOGCAnnotation allows modifying the kube-apiserver GOGC environment variable to impact how often + // the GO garbage collector runs. This can be used to reduce the memory footprint of the kube-apiserver. + KubeAPIServerGOGCAnnotation = "hypershift.openshift.io/kube-apiserver-gogc" + + // KubeAPIServerGOMemoryLimitAnnotation allows modifying the kube-apiserver GOMEMLIMIT environment variable to increase + // the frequency of memory collection when memory used rises above a particular threshhold. This can be used to reduce + // the memory footprint of the kube-apiserver during upgrades. + KubeAPIServerGOMemoryLimitAnnotation = "hypershift.openshift.io/kube-apiserver-gomemlimit" + + // KubeAPIServerMaximumRequestsInFlight allows overriding the default value for the kube-apiserver max-requests-inflight + // flag. This allows controlling how many concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-requests-inflight" + + // KubeAPIServerMaximumMutatingRequestsInFlight allows overring the default value for the kube-apiserver max-mutating-requests-inflight + // flag. This allows controlling how many mutating concurrent requests can be handled by the Kube API server at any given time. + KubeAPIServerMaximumMutatingRequestsInFlight = "hypershift.openshift.io/kube-apiserver-max-mutating-requests-inflight" + + // DisableClusterAutoscalerAnnotation allows disabling the cluster autoscaler for a hosted cluster. + // This annotation is only set by the hypershift-operator on HosterControlPlanes. + // It is not set by the end-user. + DisableClusterAutoscalerAnnotation = "hypershift.openshift.io/disable-cluster-autoscaler" + + // KubeAPIServerVerbosityLevelAnnotation allows specifing the log verbosity of kube-apiserver. + KubeAPIServerVerbosityLevelAnnotation = "hypershift.openshift.io/kube-apiserver-verbosity-level" + + // ManagementPlatformAnnotation specifies the infrastructure platform of the underlying management cluster + ManagementPlatformAnnotation = "hypershift.openshift.io/management-platform" ) // HostedClusterSpec is the desired behavior of a HostedCluster. @@ -2170,6 +2211,12 @@ type ClusterConfiguration struct { // +kubebuilder:validation:XValidation:rule="!has(self.tokenConfig) || !has(self.tokenConfig.accessTokenInactivityTimeout) || duration(self.tokenConfig.accessTokenInactivityTimeout).getSeconds() >= 300", message="spec.configuration.oauth.tokenConfig.accessTokenInactivityTimeout minimum acceptable token timeout value is 300 seconds" OAuth *configv1.OAuthSpec `json:"oauth,omitempty"` + // OperatorHub specifies the configuration for the Operator Lifecycle Manager in the HostedCluster. This is only configured at deployment time but the controller are not reconcilling over it. + // The OperatorHub configuration will be constantly reconciled if catalog placement is management, but only on cluster creation otherwise. + // + // +optional + OperatorHub *configv1.OperatorHubSpec `json:"operatorhub,omitempty"` + // Scheduler holds cluster-wide config information to run the Kubernetes Scheduler // and influence its placement decisions. The canonical name for this config is `cluster`. // +optional diff --git a/api/hypershift/v1beta1/nodepool_types.go b/api/hypershift/v1beta1/nodepool_types.go index 0a12c93a3d..3735f64b64 100644 --- a/api/hypershift/v1beta1/nodepool_types.go +++ b/api/hypershift/v1beta1/nodepool_types.go @@ -720,6 +720,13 @@ type KubevirtNodePoolPlatform struct { // +optional // +kubebuilder:default=true AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` + + // NodeSelector is a selector which must be true for the kubevirt VirtualMachine to fit on a node. + // Selector which must match a node's labels for the VM to be scheduled on that node. More info: + // https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` } // KubevirtNetwork specifies the configuration for a virtual machine diff --git a/api/hypershift/v1beta1/zz_generated.deepcopy.go b/api/hypershift/v1beta1/zz_generated.deepcopy.go index 7f8b670261..4972fa61f1 100644 --- a/api/hypershift/v1beta1/zz_generated.deepcopy.go +++ b/api/hypershift/v1beta1/zz_generated.deepcopy.go @@ -534,6 +534,95 @@ func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApproval) DeepCopyInto(out *CertificateSigningRequestApproval) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApproval. +func (in *CertificateSigningRequestApproval) DeepCopy() *CertificateSigningRequestApproval { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApproval) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestApproval) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalList) DeepCopyInto(out *CertificateSigningRequestApprovalList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequestApproval, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalList. +func (in *CertificateSigningRequestApprovalList) DeepCopy() *CertificateSigningRequestApprovalList { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestApprovalList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalSpec) DeepCopyInto(out *CertificateSigningRequestApprovalSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalSpec. +func (in *CertificateSigningRequestApprovalSpec) DeepCopy() *CertificateSigningRequestApprovalSpec { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestApprovalStatus) DeepCopyInto(out *CertificateSigningRequestApprovalStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestApprovalStatus. +func (in *CertificateSigningRequestApprovalStatus) DeepCopy() *CertificateSigningRequestApprovalStatus { + if in == nil { + return nil + } + out := new(CertificateSigningRequestApprovalStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterAutoscaling) DeepCopyInto(out *ClusterAutoscaling) { *out = *in @@ -602,6 +691,11 @@ func (in *ClusterConfiguration) DeepCopyInto(out *ClusterConfiguration) { *out = new(configv1.OAuthSpec) (*in).DeepCopyInto(*out) } + if in.OperatorHub != nil { + in, out := &in.OperatorHub, &out.OperatorHub + *out = new(configv1.OperatorHubSpec) + (*in).DeepCopyInto(*out) + } if in.Scheduler != nil { in, out := &in.Scheduler, &out.Scheduler *out = new(configv1.SchedulerSpec) @@ -1514,6 +1608,13 @@ func (in *KubevirtNodePoolPlatform) DeepCopyInto(out *KubevirtNodePoolPlatform) *out = new(bool) **out = **in } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNodePoolPlatform. diff --git a/api/util/configrefs/refs_test.go b/api/util/configrefs/refs_test.go index d94351fd76..1d66307f97 100644 --- a/api/util/configrefs/refs_test.go +++ b/api/util/configrefs/refs_test.go @@ -86,6 +86,24 @@ func TestConfigMapRefs(t *testing.T) { }, refs: []string{"oauthmetadataref"}, }, + { + name: "oidc provider", + config: &hyperv1.ClusterConfiguration{ + Authentication: &configv1.AuthenticationSpec{ + Type: configv1.AuthenticationTypeOIDC, + OIDCProviders: []configv1.OIDCProvider{ + { + Issuer: configv1.TokenIssuer{ + CertificateAuthority: configv1.ConfigMapNameReference{ + Name: "issuercaref", + }, + }, + }, + }, + }, + }, + refs: []string{"issuercaref"}, + }, { name: "image ca", config: &hyperv1.ClusterConfiguration{ @@ -366,6 +384,26 @@ func TestSecretRefs(t *testing.T) { }, refs: []string{"serving-cert1", "serving-cert2"}, }, + { + name: "oidc client secret", + config: &hyperv1.ClusterConfiguration{ + Authentication: &configv1.AuthenticationSpec{ + Type: configv1.AuthenticationTypeOIDC, + OIDCProviders: []configv1.OIDCProvider{ + { + OIDCClients: []configv1.OIDCClientConfig{ + { + ClientSecret: configv1.SecretNameReference{ + Name: "clientsecretref", + }, + }, + }, + }, + }, + }, + }, + refs: []string{"clientsecretref"}, + }, { name: "idp refs", config: &hyperv1.ClusterConfiguration{ diff --git a/availability-prober/availability_prober.go b/availability-prober/availability_prober.go index 62481c2eff..98898e4e89 100644 --- a/availability-prober/availability_prober.go +++ b/availability-prober/availability_prober.go @@ -121,7 +121,7 @@ func check(log logr.Logger, target *url.URL, requestTimeout time.Duration, sleep log.WithValues("statuscode", response.StatusCode).Info("Request didn't return a 2XX status code, retrying...") continue } - log.Info("Success", "statuscode", response.StatusCode) + log.Info("URI probing succeeded", "uri", target.String(), "statuscode", response.StatusCode) if len(requiredAPIs) > 0 { _, apis, err := discoveryClient.ServerGroupsAndResources() @@ -149,6 +149,10 @@ func check(log logr.Logger, target *url.URL, requestTimeout time.Duration, sleep log.Info("cluster infrastructure resource not yet available", "err", err) continue } + if clusterInfrastructure.Status.InfrastructureName == "" { + log.Info("cluster infrastructure name is not yet set") + continue + } } if waitForLabeledPodsGone != "" { @@ -183,6 +187,7 @@ func check(log logr.Logger, target *url.URL, requestTimeout time.Duration, sleep } } + log.Info("all checks successful, exiting...") return } } diff --git a/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go new file mode 100644 index 0000000000..5f1639b073 --- /dev/null +++ b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequest.go @@ -0,0 +1,218 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CertificateRevocationRequestApplyConfiguration represents an declarative configuration of the CertificateRevocationRequest type for use +// with apply. +type CertificateRevocationRequestApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *CertificateRevocationRequestSpecApplyConfiguration `json:"spec,omitempty"` + Status *CertificateRevocationRequestStatusApplyConfiguration `json:"status,omitempty"` +} + +// CertificateRevocationRequest constructs an declarative configuration of the CertificateRevocationRequest type for use with +// apply. +func CertificateRevocationRequest(name, namespace string) *CertificateRevocationRequestApplyConfiguration { + b := &CertificateRevocationRequestApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("CertificateRevocationRequest") + b.WithAPIVersion("certificates.hypershift.openshift.io/v1alpha1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithKind(value string) *CertificateRevocationRequestApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithAPIVersion(value string) *CertificateRevocationRequestApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithName(value string) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithGenerateName(value string) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithNamespace(value string) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithUID(value types.UID) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithResourceVersion(value string) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithGeneration(value int64) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CertificateRevocationRequestApplyConfiguration) WithLabels(entries map[string]string) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CertificateRevocationRequestApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CertificateRevocationRequestApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CertificateRevocationRequestApplyConfiguration) WithFinalizers(values ...string) *CertificateRevocationRequestApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *CertificateRevocationRequestApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithSpec(value *CertificateRevocationRequestSpecApplyConfiguration) *CertificateRevocationRequestApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CertificateRevocationRequestApplyConfiguration) WithStatus(value *CertificateRevocationRequestStatusApplyConfiguration) *CertificateRevocationRequestApplyConfiguration { + b.Status = value + return b +} diff --git a/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequestspec.go b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequestspec.go new file mode 100644 index 0000000000..17c988c267 --- /dev/null +++ b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequestspec.go @@ -0,0 +1,38 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// CertificateRevocationRequestSpecApplyConfiguration represents an declarative configuration of the CertificateRevocationRequestSpec type for use +// with apply. +type CertificateRevocationRequestSpecApplyConfiguration struct { + SignerClass *string `json:"signerClass,omitempty"` +} + +// CertificateRevocationRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateRevocationRequestSpec type for use with +// apply. +func CertificateRevocationRequestSpec() *CertificateRevocationRequestSpecApplyConfiguration { + return &CertificateRevocationRequestSpecApplyConfiguration{} +} + +// WithSignerClass sets the SignerClass field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SignerClass field is set to the value of the last call. +func (b *CertificateRevocationRequestSpecApplyConfiguration) WithSignerClass(value string) *CertificateRevocationRequestSpecApplyConfiguration { + b.SignerClass = &value + return b +} diff --git a/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequeststatus.go b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequeststatus.go new file mode 100644 index 0000000000..e84c3aa3b5 --- /dev/null +++ b/client/applyconfiguration/certificates/v1alpha1/certificaterevocationrequeststatus.go @@ -0,0 +1,67 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CertificateRevocationRequestStatusApplyConfiguration represents an declarative configuration of the CertificateRevocationRequestStatus type for use +// with apply. +type CertificateRevocationRequestStatusApplyConfiguration struct { + RevocationTimestamp *v1.Time `json:"revocationTimestamp,omitempty"` + PreviousSigner *corev1.LocalObjectReference `json:"previousSigner,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// CertificateRevocationRequestStatusApplyConfiguration constructs an declarative configuration of the CertificateRevocationRequestStatus type for use with +// apply. +func CertificateRevocationRequestStatus() *CertificateRevocationRequestStatusApplyConfiguration { + return &CertificateRevocationRequestStatusApplyConfiguration{} +} + +// WithRevocationTimestamp sets the RevocationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RevocationTimestamp field is set to the value of the last call. +func (b *CertificateRevocationRequestStatusApplyConfiguration) WithRevocationTimestamp(value v1.Time) *CertificateRevocationRequestStatusApplyConfiguration { + b.RevocationTimestamp = &value + return b +} + +// WithPreviousSigner sets the PreviousSigner field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PreviousSigner field is set to the value of the last call. +func (b *CertificateRevocationRequestStatusApplyConfiguration) WithPreviousSigner(value corev1.LocalObjectReference) *CertificateRevocationRequestStatusApplyConfiguration { + b.PreviousSigner = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *CertificateRevocationRequestStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *CertificateRevocationRequestStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/client/applyconfiguration/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/applyconfiguration/certificates/v1alpha1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..54131691c6 --- /dev/null +++ b/client/applyconfiguration/certificates/v1alpha1/certificatesigningrequestapproval.go @@ -0,0 +1,219 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CertificateSigningRequestApprovalApplyConfiguration represents an declarative configuration of the CertificateSigningRequestApproval type for use +// with apply. +type CertificateSigningRequestApprovalApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *v1alpha1.CertificateSigningRequestApprovalSpec `json:"spec,omitempty"` + Status *v1alpha1.CertificateSigningRequestApprovalStatus `json:"status,omitempty"` +} + +// CertificateSigningRequestApproval constructs an declarative configuration of the CertificateSigningRequestApproval type for use with +// apply. +func CertificateSigningRequestApproval(name, namespace string) *CertificateSigningRequestApprovalApplyConfiguration { + b := &CertificateSigningRequestApprovalApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("CertificateSigningRequestApproval") + b.WithAPIVersion("certificates.hypershift.openshift.io/v1alpha1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithKind(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithName(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *CertificateSigningRequestApprovalApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithSpec(value v1alpha1.CertificateSigningRequestApprovalSpec) *CertificateSigningRequestApprovalApplyConfiguration { + b.Spec = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithStatus(value v1alpha1.CertificateSigningRequestApprovalStatus) *CertificateSigningRequestApprovalApplyConfiguration { + b.Status = &value + return b +} diff --git a/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go b/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go index 4ecf4d940f..4e08645278 100644 --- a/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go +++ b/client/applyconfiguration/hypershift/v1alpha1/clusterconfiguration.go @@ -36,6 +36,7 @@ type ClusterConfigurationApplyConfiguration struct { Ingress *configv1.IngressSpec `json:"ingress,omitempty"` Network *configv1.NetworkSpec `json:"network,omitempty"` OAuth *configv1.OAuthSpec `json:"oauth,omitempty"` + OperatorHub *configv1.OperatorHubSpec `json:"operatorhub,omitempty"` Scheduler *configv1.SchedulerSpec `json:"scheduler,omitempty"` Proxy *configv1.ProxySpec `json:"proxy,omitempty"` } @@ -132,6 +133,14 @@ func (b *ClusterConfigurationApplyConfiguration) WithOAuth(value configv1.OAuthS return b } +// WithOperatorHub sets the OperatorHub field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorHub field is set to the value of the last call. +func (b *ClusterConfigurationApplyConfiguration) WithOperatorHub(value configv1.OperatorHubSpec) *ClusterConfigurationApplyConfiguration { + b.OperatorHub = &value + return b +} + // WithScheduler sets the Scheduler field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scheduler field is set to the value of the last call. diff --git a/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go b/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go index 5fe311f4fd..d5d2565c58 100644 --- a/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go +++ b/client/applyconfiguration/hypershift/v1alpha1/hostedclusterstatus.go @@ -19,7 +19,7 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // HostedClusterStatusApplyConfiguration represents an declarative configuration of the HostedClusterStatus type for use @@ -31,7 +31,7 @@ type HostedClusterStatusApplyConfiguration struct { IgnitionEndpoint *string `json:"ignitionEndpoint,omitempty"` ControlPlaneEndpoint *APIEndpointApplyConfiguration `json:"controlPlaneEndpoint,omitempty"` OAuthCallbackURLTemplate *string `json:"oauthCallbackURLTemplate,omitempty"` - Conditions []metav1.Condition `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` Platform *PlatformStatusApplyConfiguration `json:"platform,omitempty"` } @@ -92,9 +92,12 @@ func (b *HostedClusterStatusApplyConfiguration) WithOAuthCallbackURLTemplate(val // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *HostedClusterStatusApplyConfiguration) WithConditions(values ...metav1.Condition) *HostedClusterStatusApplyConfiguration { +func (b *HostedClusterStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *HostedClusterStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go b/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go index 14284dd1d6..f5ae52ccfd 100644 --- a/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1alpha1/kubevirtnodepoolplatform.go @@ -29,6 +29,7 @@ type KubevirtNodePoolPlatformApplyConfiguration struct { NetworkInterfaceMultiQueue *hypershiftv1alpha1.MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` AdditionalNetworks []KubevirtNetworkApplyConfiguration `json:"additionalNetworks,omitempty"` AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` } // KubevirtNodePoolPlatformApplyConfiguration constructs an declarative configuration of the KubevirtNodePoolPlatform type for use with @@ -81,3 +82,17 @@ func (b *KubevirtNodePoolPlatformApplyConfiguration) WithAttachDefaultNetwork(va b.AttachDefaultNetwork = &value return b } + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *KubevirtNodePoolPlatformApplyConfiguration) WithNodeSelector(entries map[string]string) *KubevirtNodePoolPlatformApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} diff --git a/client/applyconfiguration/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/applyconfiguration/hypershift/v1beta1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..5798fbf9ad --- /dev/null +++ b/client/applyconfiguration/hypershift/v1beta1/certificatesigningrequestapproval.go @@ -0,0 +1,219 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// CertificateSigningRequestApprovalApplyConfiguration represents an declarative configuration of the CertificateSigningRequestApproval type for use +// with apply. +type CertificateSigningRequestApprovalApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *v1beta1.CertificateSigningRequestApprovalSpec `json:"spec,omitempty"` + Status *v1beta1.CertificateSigningRequestApprovalStatus `json:"status,omitempty"` +} + +// CertificateSigningRequestApproval constructs an declarative configuration of the CertificateSigningRequestApproval type for use with +// apply. +func CertificateSigningRequestApproval(name, namespace string) *CertificateSigningRequestApprovalApplyConfiguration { + b := &CertificateSigningRequestApprovalApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("CertificateSigningRequestApproval") + b.WithAPIVersion("hypershift.openshift.io/v1beta1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithKind(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithAPIVersion(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithName(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithGenerateName(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithNamespace(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithUID(value types.UID) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithResourceVersion(value string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithGeneration(value int64) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithCreationTimestamp(value metav1.Time) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithLabels(entries map[string]string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithAnnotations(entries map[string]string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithFinalizers(values ...string) *CertificateSigningRequestApprovalApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *CertificateSigningRequestApprovalApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithSpec(value v1beta1.CertificateSigningRequestApprovalSpec) *CertificateSigningRequestApprovalApplyConfiguration { + b.Spec = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *CertificateSigningRequestApprovalApplyConfiguration) WithStatus(value v1beta1.CertificateSigningRequestApprovalStatus) *CertificateSigningRequestApprovalApplyConfiguration { + b.Status = &value + return b +} diff --git a/client/applyconfiguration/hypershift/v1beta1/clusterconfiguration.go b/client/applyconfiguration/hypershift/v1beta1/clusterconfiguration.go index 68ba19c39c..8f27725e4d 100644 --- a/client/applyconfiguration/hypershift/v1beta1/clusterconfiguration.go +++ b/client/applyconfiguration/hypershift/v1beta1/clusterconfiguration.go @@ -31,6 +31,7 @@ type ClusterConfigurationApplyConfiguration struct { Ingress *v1.IngressSpec `json:"ingress,omitempty"` Network *v1.NetworkSpec `json:"network,omitempty"` OAuth *v1.OAuthSpec `json:"oauth,omitempty"` + OperatorHub *v1.OperatorHubSpec `json:"operatorhub,omitempty"` Scheduler *v1.SchedulerSpec `json:"scheduler,omitempty"` Proxy *v1.ProxySpec `json:"proxy,omitempty"` } @@ -97,6 +98,14 @@ func (b *ClusterConfigurationApplyConfiguration) WithOAuth(value v1.OAuthSpec) * return b } +// WithOperatorHub sets the OperatorHub field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OperatorHub field is set to the value of the last call. +func (b *ClusterConfigurationApplyConfiguration) WithOperatorHub(value v1.OperatorHubSpec) *ClusterConfigurationApplyConfiguration { + b.OperatorHub = &value + return b +} + // WithScheduler sets the Scheduler field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Scheduler field is set to the value of the last call. diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go b/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go index 15dbf41835..c0b9d59312 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedclusterstatus.go @@ -19,7 +19,7 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // HostedClusterStatusApplyConfiguration represents an declarative configuration of the HostedClusterStatus type for use @@ -31,7 +31,7 @@ type HostedClusterStatusApplyConfiguration struct { IgnitionEndpoint *string `json:"ignitionEndpoint,omitempty"` ControlPlaneEndpoint *APIEndpointApplyConfiguration `json:"controlPlaneEndpoint,omitempty"` OAuthCallbackURLTemplate *string `json:"oauthCallbackURLTemplate,omitempty"` - Conditions []metav1.Condition `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` Platform *PlatformStatusApplyConfiguration `json:"platform,omitempty"` } @@ -92,9 +92,12 @@ func (b *HostedClusterStatusApplyConfiguration) WithOAuthCallbackURLTemplate(val // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *HostedClusterStatusApplyConfiguration) WithConditions(values ...metav1.Condition) *HostedClusterStatusApplyConfiguration { +func (b *HostedClusterStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *HostedClusterStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go index 9706367728..beec2f4fc6 100644 --- a/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go +++ b/client/applyconfiguration/hypershift/v1beta1/hostedcontrolplanestatus.go @@ -20,6 +20,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/client-go/applyconfigurations/meta/v1" ) // HostedControlPlaneStatusApplyConfiguration represents an declarative configuration of the HostedControlPlaneStatus type for use @@ -36,7 +37,7 @@ type HostedControlPlaneStatusApplyConfiguration struct { LastReleaseImageTransitionTime *v1.Time `json:"lastReleaseImageTransitionTime,omitempty"` KubeConfig *KubeconfigSecretRefApplyConfiguration `json:"kubeConfig,omitempty"` KubeadminPassword *corev1.LocalObjectReference `json:"kubeadminPassword,omitempty"` - Conditions []v1.Condition `json:"conditions,omitempty"` + Conditions []metav1.ConditionApplyConfiguration `json:"conditions,omitempty"` Platform *PlatformStatusApplyConfiguration `json:"platform,omitempty"` } @@ -137,9 +138,12 @@ func (b *HostedControlPlaneStatusApplyConfiguration) WithKubeadminPassword(value // WithConditions adds the given value to the Conditions field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Conditions field. -func (b *HostedControlPlaneStatusApplyConfiguration) WithConditions(values ...v1.Condition) *HostedControlPlaneStatusApplyConfiguration { +func (b *HostedControlPlaneStatusApplyConfiguration) WithConditions(values ...*metav1.ConditionApplyConfiguration) *HostedControlPlaneStatusApplyConfiguration { for i := range values { - b.Conditions = append(b.Conditions, values[i]) + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) } return b } diff --git a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go index 089604854f..421cac736f 100644 --- a/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go +++ b/client/applyconfiguration/hypershift/v1beta1/kubevirtnodepoolplatform.go @@ -29,6 +29,7 @@ type KubevirtNodePoolPlatformApplyConfiguration struct { NetworkInterfaceMultiQueue *hypershiftv1beta1.MultiQueueSetting `json:"networkInterfaceMultiqueue,omitempty"` AdditionalNetworks []KubevirtNetworkApplyConfiguration `json:"additionalNetworks,omitempty"` AttachDefaultNetwork *bool `json:"attachDefaultNetwork,omitempty"` + NodeSelector map[string]string `json:"nodeSelector,omitempty"` } // KubevirtNodePoolPlatformApplyConfiguration constructs an declarative configuration of the KubevirtNodePoolPlatform type for use with @@ -81,3 +82,17 @@ func (b *KubevirtNodePoolPlatformApplyConfiguration) WithAttachDefaultNetwork(va b.AttachDefaultNetwork = &value return b } + +// WithNodeSelector puts the entries into the NodeSelector field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the NodeSelector field, +// overwriting an existing map entries in NodeSelector field with the same key. +func (b *KubevirtNodePoolPlatformApplyConfiguration) WithNodeSelector(entries map[string]string) *KubevirtNodePoolPlatformApplyConfiguration { + if b.NodeSelector == nil && len(entries) > 0 { + b.NodeSelector = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.NodeSelector[k] = v + } + return b +} diff --git a/client/applyconfiguration/utils.go b/client/applyconfiguration/utils.go index 5870c88c77..9253101045 100644 --- a/client/applyconfiguration/utils.go +++ b/client/applyconfiguration/utils.go @@ -18,9 +18,11 @@ limitations under the License. package applyconfiguration import ( - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" - hypershiftv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1" + certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + applyconfigurationhypershiftv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -29,179 +31,189 @@ import ( // apply configuration type exists for the given GroupVersionKind. func ForKind(kind schema.GroupVersionKind) interface{} { switch kind { - // Group=hypershift.openshift.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithKind("AESCBCSpec"): - return &hypershiftv1alpha1.AESCBCSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AgentNodePoolPlatform"): - return &hypershiftv1alpha1.AgentNodePoolPlatformApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AgentPlatformSpec"): - return &hypershiftv1alpha1.AgentPlatformSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("APIEndpoint"): - return &hypershiftv1alpha1.APIEndpointApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("APIServerNetworking"): - return &hypershiftv1alpha1.APIServerNetworkingApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSCloudProviderConfig"): - return &hypershiftv1alpha1.AWSCloudProviderConfigApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSKMSAuthSpec"): - return &hypershiftv1alpha1.AWSKMSAuthSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSKMSKeyEntry"): - return &hypershiftv1alpha1.AWSKMSKeyEntryApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSKMSSpec"): - return &hypershiftv1alpha1.AWSKMSSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSNodePoolPlatform"): - return &hypershiftv1alpha1.AWSNodePoolPlatformApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSPlatformSpec"): - return &hypershiftv1alpha1.AWSPlatformSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSPlatformStatus"): - return &hypershiftv1alpha1.AWSPlatformStatusApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSResourceReference"): - return &hypershiftv1alpha1.AWSResourceReferenceApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSResourceTag"): - return &hypershiftv1alpha1.AWSResourceTagApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSRoleCredentials"): - return &hypershiftv1alpha1.AWSRoleCredentialsApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSRolesRef"): - return &hypershiftv1alpha1.AWSRolesRefApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AWSServiceEndpoint"): - return &hypershiftv1alpha1.AWSServiceEndpointApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AzureKMSSpec"): - return &hypershiftv1alpha1.AzureKMSSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AzureNodePoolPlatform"): - return &hypershiftv1alpha1.AzureNodePoolPlatformApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("AzurePlatformSpec"): - return &hypershiftv1alpha1.AzurePlatformSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ClusterAutoscaling"): - return &hypershiftv1alpha1.ClusterAutoscalingApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ClusterConfiguration"): - return &hypershiftv1alpha1.ClusterConfigurationApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ClusterNetworkEntry"): - return &hypershiftv1alpha1.ClusterNetworkEntryApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ClusterNetworking"): - return &hypershiftv1alpha1.ClusterNetworkingApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ClusterVersionStatus"): - return &hypershiftv1alpha1.ClusterVersionStatusApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("DNSSpec"): - return &hypershiftv1alpha1.DNSSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("EtcdSpec"): - return &hypershiftv1alpha1.EtcdSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("EtcdTLSConfig"): - return &hypershiftv1alpha1.EtcdTLSConfigApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("Filter"): - return &hypershiftv1alpha1.FilterApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("HostedCluster"): - return &hypershiftv1alpha1.HostedClusterApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("HostedClusterSpec"): - return &hypershiftv1alpha1.HostedClusterSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("HostedClusterStatus"): - return &hypershiftv1alpha1.HostedClusterStatusApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSAuthSpec"): - return &hypershiftv1alpha1.IBMCloudKMSAuthSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSKeyEntry"): - return &hypershiftv1alpha1.IBMCloudKMSKeyEntryApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSSpec"): - return &hypershiftv1alpha1.IBMCloudKMSSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSUnmanagedAuthSpec"): - return &hypershiftv1alpha1.IBMCloudKMSUnmanagedAuthSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("IBMCloudPlatformSpec"): - return &hypershiftv1alpha1.IBMCloudPlatformSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ImageContentSource"): - return &hypershiftv1alpha1.ImageContentSourceApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("InPlaceUpgrade"): - return &hypershiftv1alpha1.InPlaceUpgradeApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KMSSpec"): - return &hypershiftv1alpha1.KMSSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubeconfigSecretRef"): - return &hypershiftv1alpha1.KubeconfigSecretRefApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtCachingStrategy"): - return &hypershiftv1alpha1.KubevirtCachingStrategyApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtCompute"): - return &hypershiftv1alpha1.KubevirtComputeApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtDiskImage"): - return &hypershiftv1alpha1.KubevirtDiskImageApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtManualStorageDriverConfig"): - return &hypershiftv1alpha1.KubevirtManualStorageDriverConfigApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtNetwork"): - return &hypershiftv1alpha1.KubevirtNetworkApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtNodePoolPlatform"): - return &hypershiftv1alpha1.KubevirtNodePoolPlatformApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubeVirtNodePoolStatus"): - return &hypershiftv1alpha1.KubeVirtNodePoolStatusApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtPersistentVolume"): - return &hypershiftv1alpha1.KubevirtPersistentVolumeApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtPlatformCredentials"): - return &hypershiftv1alpha1.KubevirtPlatformCredentialsApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtPlatformSpec"): - return &hypershiftv1alpha1.KubevirtPlatformSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtRootVolume"): - return &hypershiftv1alpha1.KubevirtRootVolumeApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtStorageClassMapping"): - return &hypershiftv1alpha1.KubevirtStorageClassMappingApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtStorageDriverSpec"): - return &hypershiftv1alpha1.KubevirtStorageDriverSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("KubevirtVolume"): - return &hypershiftv1alpha1.KubevirtVolumeApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("LoadBalancerPublishingStrategy"): - return &hypershiftv1alpha1.LoadBalancerPublishingStrategyApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("MachineNetworkEntry"): - return &hypershiftv1alpha1.MachineNetworkEntryApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ManagedEtcdSpec"): - return &hypershiftv1alpha1.ManagedEtcdSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ManagedEtcdStorageSpec"): - return &hypershiftv1alpha1.ManagedEtcdStorageSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePool"): - return &hypershiftv1alpha1.NodePoolApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePoolAutoScaling"): - return &hypershiftv1alpha1.NodePoolAutoScalingApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePoolCondition"): - return &hypershiftv1alpha1.NodePoolConditionApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePoolManagement"): - return &hypershiftv1alpha1.NodePoolManagementApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePoolPlatform"): - return &hypershiftv1alpha1.NodePoolPlatformApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePoolPlatformStatus"): - return &hypershiftv1alpha1.NodePoolPlatformStatusApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePoolSpec"): - return &hypershiftv1alpha1.NodePoolSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePoolStatus"): - return &hypershiftv1alpha1.NodePoolStatusApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("NodePortPublishingStrategy"): - return &hypershiftv1alpha1.NodePortPublishingStrategyApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PersistentVolumeEtcdStorageSpec"): - return &hypershiftv1alpha1.PersistentVolumeEtcdStorageSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PlatformSpec"): - return &hypershiftv1alpha1.PlatformSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PlatformStatus"): - return &hypershiftv1alpha1.PlatformStatusApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PowerVSNodePoolPlatform"): - return &hypershiftv1alpha1.PowerVSNodePoolPlatformApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PowerVSPlatformSpec"): - return &hypershiftv1alpha1.PowerVSPlatformSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PowerVSResourceReference"): - return &hypershiftv1alpha1.PowerVSResourceReferenceApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("PowerVSVPC"): - return &hypershiftv1alpha1.PowerVSVPCApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("Release"): - return &hypershiftv1alpha1.ReleaseApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ReplaceUpgrade"): - return &hypershiftv1alpha1.ReplaceUpgradeApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("RollingUpdate"): - return &hypershiftv1alpha1.RollingUpdateApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("RoutePublishingStrategy"): - return &hypershiftv1alpha1.RoutePublishingStrategyApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("SecretEncryptionSpec"): - return &hypershiftv1alpha1.SecretEncryptionSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ServiceNetworkEntry"): - return &hypershiftv1alpha1.ServiceNetworkEntryApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ServicePublishingStrategy"): - return &hypershiftv1alpha1.ServicePublishingStrategyApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("ServicePublishingStrategyMapping"): - return &hypershiftv1alpha1.ServicePublishingStrategyMappingApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("Taint"): - return &hypershiftv1alpha1.TaintApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("UnmanagedEtcdSpec"): - return &hypershiftv1alpha1.UnmanagedEtcdSpecApplyConfiguration{} - case v1alpha1.SchemeGroupVersion.WithKind("Volume"): - return &hypershiftv1alpha1.VolumeApplyConfiguration{} + // Group=certificates.hypershift.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithKind("CertificateRevocationRequest"): + return &certificatesv1alpha1.CertificateRevocationRequestApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("CertificateRevocationRequestSpec"): + return &certificatesv1alpha1.CertificateRevocationRequestSpecApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("CertificateRevocationRequestStatus"): + return &certificatesv1alpha1.CertificateRevocationRequestStatusApplyConfiguration{} + case v1alpha1.SchemeGroupVersion.WithKind("CertificateSigningRequestApproval"): + return &certificatesv1alpha1.CertificateSigningRequestApprovalApplyConfiguration{} + + // Group=hypershift.openshift.io, Version=v1alpha1 + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AESCBCSpec"): + return &applyconfigurationhypershiftv1alpha1.AESCBCSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AgentNodePoolPlatform"): + return &applyconfigurationhypershiftv1alpha1.AgentNodePoolPlatformApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AgentPlatformSpec"): + return &applyconfigurationhypershiftv1alpha1.AgentPlatformSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("APIEndpoint"): + return &applyconfigurationhypershiftv1alpha1.APIEndpointApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("APIServerNetworking"): + return &applyconfigurationhypershiftv1alpha1.APIServerNetworkingApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSCloudProviderConfig"): + return &applyconfigurationhypershiftv1alpha1.AWSCloudProviderConfigApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSKMSAuthSpec"): + return &applyconfigurationhypershiftv1alpha1.AWSKMSAuthSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSKMSKeyEntry"): + return &applyconfigurationhypershiftv1alpha1.AWSKMSKeyEntryApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSKMSSpec"): + return &applyconfigurationhypershiftv1alpha1.AWSKMSSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSNodePoolPlatform"): + return &applyconfigurationhypershiftv1alpha1.AWSNodePoolPlatformApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSPlatformSpec"): + return &applyconfigurationhypershiftv1alpha1.AWSPlatformSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSPlatformStatus"): + return &applyconfigurationhypershiftv1alpha1.AWSPlatformStatusApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSResourceReference"): + return &applyconfigurationhypershiftv1alpha1.AWSResourceReferenceApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSResourceTag"): + return &applyconfigurationhypershiftv1alpha1.AWSResourceTagApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSRoleCredentials"): + return &applyconfigurationhypershiftv1alpha1.AWSRoleCredentialsApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSRolesRef"): + return &applyconfigurationhypershiftv1alpha1.AWSRolesRefApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AWSServiceEndpoint"): + return &applyconfigurationhypershiftv1alpha1.AWSServiceEndpointApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AzureKMSSpec"): + return &applyconfigurationhypershiftv1alpha1.AzureKMSSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AzureNodePoolPlatform"): + return &applyconfigurationhypershiftv1alpha1.AzureNodePoolPlatformApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("AzurePlatformSpec"): + return &applyconfigurationhypershiftv1alpha1.AzurePlatformSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ClusterAutoscaling"): + return &applyconfigurationhypershiftv1alpha1.ClusterAutoscalingApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ClusterConfiguration"): + return &applyconfigurationhypershiftv1alpha1.ClusterConfigurationApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ClusterNetworkEntry"): + return &applyconfigurationhypershiftv1alpha1.ClusterNetworkEntryApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ClusterNetworking"): + return &applyconfigurationhypershiftv1alpha1.ClusterNetworkingApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ClusterVersionStatus"): + return &applyconfigurationhypershiftv1alpha1.ClusterVersionStatusApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("DNSSpec"): + return &applyconfigurationhypershiftv1alpha1.DNSSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("EtcdSpec"): + return &applyconfigurationhypershiftv1alpha1.EtcdSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("EtcdTLSConfig"): + return &applyconfigurationhypershiftv1alpha1.EtcdTLSConfigApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("Filter"): + return &applyconfigurationhypershiftv1alpha1.FilterApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("HostedCluster"): + return &applyconfigurationhypershiftv1alpha1.HostedClusterApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("HostedClusterSpec"): + return &applyconfigurationhypershiftv1alpha1.HostedClusterSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("HostedClusterStatus"): + return &applyconfigurationhypershiftv1alpha1.HostedClusterStatusApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSAuthSpec"): + return &applyconfigurationhypershiftv1alpha1.IBMCloudKMSAuthSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSKeyEntry"): + return &applyconfigurationhypershiftv1alpha1.IBMCloudKMSKeyEntryApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSSpec"): + return &applyconfigurationhypershiftv1alpha1.IBMCloudKMSSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("IBMCloudKMSUnmanagedAuthSpec"): + return &applyconfigurationhypershiftv1alpha1.IBMCloudKMSUnmanagedAuthSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("IBMCloudPlatformSpec"): + return &applyconfigurationhypershiftv1alpha1.IBMCloudPlatformSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ImageContentSource"): + return &applyconfigurationhypershiftv1alpha1.ImageContentSourceApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("InPlaceUpgrade"): + return &applyconfigurationhypershiftv1alpha1.InPlaceUpgradeApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KMSSpec"): + return &applyconfigurationhypershiftv1alpha1.KMSSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubeconfigSecretRef"): + return &applyconfigurationhypershiftv1alpha1.KubeconfigSecretRefApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtCachingStrategy"): + return &applyconfigurationhypershiftv1alpha1.KubevirtCachingStrategyApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtCompute"): + return &applyconfigurationhypershiftv1alpha1.KubevirtComputeApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtDiskImage"): + return &applyconfigurationhypershiftv1alpha1.KubevirtDiskImageApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtManualStorageDriverConfig"): + return &applyconfigurationhypershiftv1alpha1.KubevirtManualStorageDriverConfigApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtNetwork"): + return &applyconfigurationhypershiftv1alpha1.KubevirtNetworkApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtNodePoolPlatform"): + return &applyconfigurationhypershiftv1alpha1.KubevirtNodePoolPlatformApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubeVirtNodePoolStatus"): + return &applyconfigurationhypershiftv1alpha1.KubeVirtNodePoolStatusApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtPersistentVolume"): + return &applyconfigurationhypershiftv1alpha1.KubevirtPersistentVolumeApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtPlatformCredentials"): + return &applyconfigurationhypershiftv1alpha1.KubevirtPlatformCredentialsApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtPlatformSpec"): + return &applyconfigurationhypershiftv1alpha1.KubevirtPlatformSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtRootVolume"): + return &applyconfigurationhypershiftv1alpha1.KubevirtRootVolumeApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtStorageClassMapping"): + return &applyconfigurationhypershiftv1alpha1.KubevirtStorageClassMappingApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtStorageDriverSpec"): + return &applyconfigurationhypershiftv1alpha1.KubevirtStorageDriverSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("KubevirtVolume"): + return &applyconfigurationhypershiftv1alpha1.KubevirtVolumeApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("LoadBalancerPublishingStrategy"): + return &applyconfigurationhypershiftv1alpha1.LoadBalancerPublishingStrategyApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("MachineNetworkEntry"): + return &applyconfigurationhypershiftv1alpha1.MachineNetworkEntryApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ManagedEtcdSpec"): + return &applyconfigurationhypershiftv1alpha1.ManagedEtcdSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ManagedEtcdStorageSpec"): + return &applyconfigurationhypershiftv1alpha1.ManagedEtcdStorageSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePool"): + return &applyconfigurationhypershiftv1alpha1.NodePoolApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePoolAutoScaling"): + return &applyconfigurationhypershiftv1alpha1.NodePoolAutoScalingApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePoolCondition"): + return &applyconfigurationhypershiftv1alpha1.NodePoolConditionApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePoolManagement"): + return &applyconfigurationhypershiftv1alpha1.NodePoolManagementApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePoolPlatform"): + return &applyconfigurationhypershiftv1alpha1.NodePoolPlatformApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePoolPlatformStatus"): + return &applyconfigurationhypershiftv1alpha1.NodePoolPlatformStatusApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePoolSpec"): + return &applyconfigurationhypershiftv1alpha1.NodePoolSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePoolStatus"): + return &applyconfigurationhypershiftv1alpha1.NodePoolStatusApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("NodePortPublishingStrategy"): + return &applyconfigurationhypershiftv1alpha1.NodePortPublishingStrategyApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("PersistentVolumeEtcdStorageSpec"): + return &applyconfigurationhypershiftv1alpha1.PersistentVolumeEtcdStorageSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("PlatformSpec"): + return &applyconfigurationhypershiftv1alpha1.PlatformSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("PlatformStatus"): + return &applyconfigurationhypershiftv1alpha1.PlatformStatusApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("PowerVSNodePoolPlatform"): + return &applyconfigurationhypershiftv1alpha1.PowerVSNodePoolPlatformApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("PowerVSPlatformSpec"): + return &applyconfigurationhypershiftv1alpha1.PowerVSPlatformSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("PowerVSResourceReference"): + return &applyconfigurationhypershiftv1alpha1.PowerVSResourceReferenceApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("PowerVSVPC"): + return &applyconfigurationhypershiftv1alpha1.PowerVSVPCApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("Release"): + return &applyconfigurationhypershiftv1alpha1.ReleaseApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ReplaceUpgrade"): + return &applyconfigurationhypershiftv1alpha1.ReplaceUpgradeApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("RollingUpdate"): + return &applyconfigurationhypershiftv1alpha1.RollingUpdateApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("RoutePublishingStrategy"): + return &applyconfigurationhypershiftv1alpha1.RoutePublishingStrategyApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("SecretEncryptionSpec"): + return &applyconfigurationhypershiftv1alpha1.SecretEncryptionSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ServiceNetworkEntry"): + return &applyconfigurationhypershiftv1alpha1.ServiceNetworkEntryApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ServicePublishingStrategy"): + return &applyconfigurationhypershiftv1alpha1.ServicePublishingStrategyApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("ServicePublishingStrategyMapping"): + return &applyconfigurationhypershiftv1alpha1.ServicePublishingStrategyMappingApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("Taint"): + return &applyconfigurationhypershiftv1alpha1.TaintApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("UnmanagedEtcdSpec"): + return &applyconfigurationhypershiftv1alpha1.UnmanagedEtcdSpecApplyConfiguration{} + case hypershiftv1alpha1.SchemeGroupVersion.WithKind("Volume"): + return &applyconfigurationhypershiftv1alpha1.VolumeApplyConfiguration{} // Group=hypershift.openshift.io, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithKind("AESCBCSpec"): @@ -242,6 +254,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &hypershiftv1beta1.AzureNodePoolPlatformApplyConfiguration{} case v1beta1.SchemeGroupVersion.WithKind("AzurePlatformSpec"): return &hypershiftv1beta1.AzurePlatformSpecApplyConfiguration{} + case v1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestApproval"): + return &hypershiftv1beta1.CertificateSigningRequestApprovalApplyConfiguration{} case v1beta1.SchemeGroupVersion.WithKind("ClusterAutoscaling"): return &hypershiftv1beta1.ClusterAutoscalingApplyConfiguration{} case v1beta1.SchemeGroupVersion.WithKind("ClusterConfiguration"): diff --git a/client/clientset/clientset/clientset.go b/client/clientset/clientset/clientset.go index 0e6c429136..e4f581feae 100644 --- a/client/clientset/clientset/clientset.go +++ b/client/clientset/clientset/clientset.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" + certificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1" hypershiftv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1" discovery "k8s.io/client-go/discovery" @@ -30,6 +31,7 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface + CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface HypershiftV1alpha1() hypershiftv1alpha1.HypershiftV1alpha1Interface HypershiftV1beta1() hypershiftv1beta1.HypershiftV1beta1Interface } @@ -37,8 +39,14 @@ type Interface interface { // Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient - hypershiftV1alpha1 *hypershiftv1alpha1.HypershiftV1alpha1Client - hypershiftV1beta1 *hypershiftv1beta1.HypershiftV1beta1Client + certificatesV1alpha1 *certificatesv1alpha1.CertificatesV1alpha1Client + hypershiftV1alpha1 *hypershiftv1alpha1.HypershiftV1alpha1Client + hypershiftV1beta1 *hypershiftv1beta1.HypershiftV1beta1Client +} + +// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client +func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + return c.certificatesV1alpha1 } // HypershiftV1alpha1 retrieves the HypershiftV1alpha1Client @@ -95,6 +103,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, var cs Clientset var err error + cs.certificatesV1alpha1, err = certificatesv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.hypershiftV1alpha1, err = hypershiftv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -124,6 +136,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.certificatesV1alpha1 = certificatesv1alpha1.New(c) cs.hypershiftV1alpha1 = hypershiftv1alpha1.New(c) cs.hypershiftV1beta1 = hypershiftv1beta1.New(c) diff --git a/client/clientset/clientset/fake/clientset_generated.go b/client/clientset/clientset/fake/clientset_generated.go index 5a8605c9ae..194f085464 100644 --- a/client/clientset/clientset/fake/clientset_generated.go +++ b/client/clientset/clientset/fake/clientset_generated.go @@ -19,6 +19,8 @@ package fake import ( clientset "github.com/openshift/hypershift/client/clientset/clientset" + certificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1" + fakecertificatesv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1/fake" hypershiftv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1" fakehypershiftv1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1alpha1/fake" hypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1" @@ -80,6 +82,11 @@ var ( _ testing.FakeClient = &Clientset{} ) +// CertificatesV1alpha1 retrieves the CertificatesV1alpha1Client +func (c *Clientset) CertificatesV1alpha1() certificatesv1alpha1.CertificatesV1alpha1Interface { + return &fakecertificatesv1alpha1.FakeCertificatesV1alpha1{Fake: &c.Fake} +} + // HypershiftV1alpha1 retrieves the HypershiftV1alpha1Client func (c *Clientset) HypershiftV1alpha1() hypershiftv1alpha1.HypershiftV1alpha1Interface { return &fakehypershiftv1alpha1.FakeHypershiftV1alpha1{Fake: &c.Fake} diff --git a/client/clientset/clientset/fake/register.go b/client/clientset/clientset/fake/register.go index cf336fd1cb..056c332d88 100644 --- a/client/clientset/clientset/fake/register.go +++ b/client/clientset/clientset/fake/register.go @@ -18,6 +18,7 @@ limitations under the License. package fake import ( + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,6 +32,7 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + certificatesv1alpha1.AddToScheme, hypershiftv1alpha1.AddToScheme, hypershiftv1beta1.AddToScheme, } diff --git a/client/clientset/clientset/scheme/register.go b/client/clientset/clientset/scheme/register.go index 02f80fde31..92f1e1295a 100644 --- a/client/clientset/clientset/scheme/register.go +++ b/client/clientset/clientset/scheme/register.go @@ -18,6 +18,7 @@ limitations under the License. package scheme import ( + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,6 +32,7 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + certificatesv1alpha1.AddToScheme, hypershiftv1alpha1.AddToScheme, hypershiftv1beta1.AddToScheme, } diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go new file mode 100644 index 0000000000..b241c32c89 --- /dev/null +++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificaterevocationrequest.go @@ -0,0 +1,255 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + scheme "github.com/openshift/hypershift/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CertificateRevocationRequestsGetter has a method to return a CertificateRevocationRequestInterface. +// A group's client should implement this interface. +type CertificateRevocationRequestsGetter interface { + CertificateRevocationRequests(namespace string) CertificateRevocationRequestInterface +} + +// CertificateRevocationRequestInterface has methods to work with CertificateRevocationRequest resources. +type CertificateRevocationRequestInterface interface { + Create(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.CreateOptions) (*v1alpha1.CertificateRevocationRequest, error) + Update(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.UpdateOptions) (*v1alpha1.CertificateRevocationRequest, error) + UpdateStatus(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.UpdateOptions) (*v1alpha1.CertificateRevocationRequest, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CertificateRevocationRequest, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CertificateRevocationRequestList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertificateRevocationRequest, err error) + Apply(ctx context.Context, certificateRevocationRequest *certificatesv1alpha1.CertificateRevocationRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateRevocationRequest, err error) + ApplyStatus(ctx context.Context, certificateRevocationRequest *certificatesv1alpha1.CertificateRevocationRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateRevocationRequest, err error) + CertificateRevocationRequestExpansion +} + +// certificateRevocationRequests implements CertificateRevocationRequestInterface +type certificateRevocationRequests struct { + client rest.Interface + ns string +} + +// newCertificateRevocationRequests returns a CertificateRevocationRequests +func newCertificateRevocationRequests(c *CertificatesV1alpha1Client, namespace string) *certificateRevocationRequests { + return &certificateRevocationRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the certificateRevocationRequest, and returns the corresponding certificateRevocationRequest object, and an error if there is any. +func (c *certificateRevocationRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + result = &v1alpha1.CertificateRevocationRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateRevocationRequests that match those selectors. +func (c *certificateRevocationRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CertificateRevocationRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.CertificateRevocationRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateRevocationRequests. +func (c *certificateRevocationRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a certificateRevocationRequest and creates it. Returns the server's representation of the certificateRevocationRequest, and an error, if there is any. +func (c *certificateRevocationRequests) Create(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.CreateOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + result = &v1alpha1.CertificateRevocationRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateRevocationRequest). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a certificateRevocationRequest and updates it. Returns the server's representation of the certificateRevocationRequest, and an error, if there is any. +func (c *certificateRevocationRequests) Update(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.UpdateOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + result = &v1alpha1.CertificateRevocationRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + Name(certificateRevocationRequest.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateRevocationRequest). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *certificateRevocationRequests) UpdateStatus(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.UpdateOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + result = &v1alpha1.CertificateRevocationRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + Name(certificateRevocationRequest.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateRevocationRequest). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the certificateRevocationRequest and deletes it. Returns an error if one occurs. +func (c *certificateRevocationRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateRevocationRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched certificateRevocationRequest. +func (c *certificateRevocationRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertificateRevocationRequest, err error) { + result = &v1alpha1.CertificateRevocationRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied certificateRevocationRequest. +func (c *certificateRevocationRequests) Apply(ctx context.Context, certificateRevocationRequest *certificatesv1alpha1.CertificateRevocationRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + if certificateRevocationRequest == nil { + return nil, fmt.Errorf("certificateRevocationRequest provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(certificateRevocationRequest) + if err != nil { + return nil, err + } + name := certificateRevocationRequest.Name + if name == nil { + return nil, fmt.Errorf("certificateRevocationRequest.Name must be provided to Apply") + } + result = &v1alpha1.CertificateRevocationRequest{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *certificateRevocationRequests) ApplyStatus(ctx context.Context, certificateRevocationRequest *certificatesv1alpha1.CertificateRevocationRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + if certificateRevocationRequest == nil { + return nil, fmt.Errorf("certificateRevocationRequest provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(certificateRevocationRequest) + if err != nil { + return nil, err + } + + name := certificateRevocationRequest.Name + if name == nil { + return nil, fmt.Errorf("certificateRevocationRequest.Name must be provided to Apply") + } + + result = &v1alpha1.CertificateRevocationRequest{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("certificaterevocationrequests"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go new file mode 100644 index 0000000000..6fca6b0654 --- /dev/null +++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificates_client.go @@ -0,0 +1,111 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + "github.com/openshift/hypershift/client/clientset/clientset/scheme" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1alpha1Interface interface { + RESTClient() rest.Interface + CertificateRevocationRequestsGetter + CertificateSigningRequestApprovalsGetter +} + +// CertificatesV1alpha1Client is used to interact with features provided by the certificates.hypershift.openshift.io group. +type CertificatesV1alpha1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1alpha1Client) CertificateRevocationRequests(namespace string) CertificateRevocationRequestInterface { + return newCertificateRevocationRequests(c, namespace) +} + +func (c *CertificatesV1alpha1Client) CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalInterface { + return newCertificateSigningRequestApprovals(c, namespace) +} + +// NewForConfig creates a new CertificatesV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new CertificatesV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*CertificatesV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &CertificatesV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1alpha1Client { + return &CertificatesV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..e71feed455 --- /dev/null +++ b/client/clientset/clientset/typed/certificates/v1alpha1/certificatesigningrequestapproval.go @@ -0,0 +1,255 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + scheme "github.com/openshift/hypershift/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CertificateSigningRequestApprovalsGetter has a method to return a CertificateSigningRequestApprovalInterface. +// A group's client should implement this interface. +type CertificateSigningRequestApprovalsGetter interface { + CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalInterface +} + +// CertificateSigningRequestApprovalInterface has methods to work with CertificateSigningRequestApproval resources. +type CertificateSigningRequestApprovalInterface interface { + Create(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.CreateOptions) (*v1alpha1.CertificateSigningRequestApproval, error) + Update(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (*v1alpha1.CertificateSigningRequestApproval, error) + UpdateStatus(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (*v1alpha1.CertificateSigningRequestApproval, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.CertificateSigningRequestApproval, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.CertificateSigningRequestApprovalList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertificateSigningRequestApproval, err error) + Apply(ctx context.Context, certificateSigningRequestApproval *certificatesv1alpha1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) + ApplyStatus(ctx context.Context, certificateSigningRequestApproval *certificatesv1alpha1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) + CertificateSigningRequestApprovalExpansion +} + +// certificateSigningRequestApprovals implements CertificateSigningRequestApprovalInterface +type certificateSigningRequestApprovals struct { + client rest.Interface + ns string +} + +// newCertificateSigningRequestApprovals returns a CertificateSigningRequestApprovals +func newCertificateSigningRequestApprovals(c *CertificatesV1alpha1Client, namespace string) *certificateSigningRequestApprovals { + return &certificateSigningRequestApprovals{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the certificateSigningRequestApproval, and returns the corresponding certificateSigningRequestApproval object, and an error if there is any. +func (c *certificateSigningRequestApprovals) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + result = &v1alpha1.CertificateSigningRequestApproval{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequestApprovals that match those selectors. +func (c *certificateSigningRequestApprovals) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CertificateSigningRequestApprovalList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.CertificateSigningRequestApprovalList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequestApprovals. +func (c *certificateSigningRequestApprovals) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a certificateSigningRequestApproval and creates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *certificateSigningRequestApprovals) Create(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.CreateOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + result = &v1alpha1.CertificateSigningRequestApproval{} + err = c.client.Post(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateSigningRequestApproval). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a certificateSigningRequestApproval and updates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *certificateSigningRequestApprovals) Update(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + result = &v1alpha1.CertificateSigningRequestApproval{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(certificateSigningRequestApproval.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateSigningRequestApproval). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *certificateSigningRequestApprovals) UpdateStatus(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + result = &v1alpha1.CertificateSigningRequestApproval{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(certificateSigningRequestApproval.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateSigningRequestApproval). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the certificateSigningRequestApproval and deletes it. Returns an error if one occurs. +func (c *certificateSigningRequestApprovals) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateSigningRequestApprovals) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched certificateSigningRequestApproval. +func (c *certificateSigningRequestApprovals) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + result = &v1alpha1.CertificateSigningRequestApproval{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequestApproval. +func (c *certificateSigningRequestApprovals) Apply(ctx context.Context, certificateSigningRequestApproval *certificatesv1alpha1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + result = &v1alpha1.CertificateSigningRequestApproval{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *certificateSigningRequestApprovals) ApplyStatus(ctx context.Context, certificateSigningRequestApproval *certificatesv1alpha1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + + result = &v1alpha1.CertificateSigningRequestApproval{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/handlers.go b/client/clientset/clientset/typed/certificates/v1alpha1/doc.go similarity index 62% rename from vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/handlers.go rename to client/clientset/clientset/typed/certificates/v1alpha1/doc.go index cd018b97fa..0e375e4fc2 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/handlers.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Kubernetes Authors. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,14 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. -package util - -import "net/http" - -// RedirectTo redirects request to a certain destination. -func RedirectTo(to string) func(http.ResponseWriter, *http.Request) { - return func(rw http.ResponseWriter, req *http.Request) { - http.Redirect(rw, req, to, http.StatusMovedPermanently) - } -} +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/url.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/doc.go similarity index 52% rename from vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/url.go rename to client/clientset/clientset/typed/certificates/v1alpha1/fake/doc.go index 9b22b23780..422564f2d5 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/url.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 The Kubernetes Authors. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,25 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. -package util - -import ( - "bytes" - "fmt" - "net/url" - "strings" -) - -// PrettyPrintURL decodes the URL encoded input string and print each key=val -// pair separated by comma -func PrettyPrintURL(urlEncode string) string { - decoded, _ := url.ParseQuery(urlEncode) - var buf bytes.Buffer - for key, val := range decoded { - for _, subVal := range val { - buf.WriteString(fmt.Sprintf("%s=%s,", key, subVal)) - } - } - return strings.TrimSuffix(buf.String(), ",") -} +// Package fake has the automatically generated clients. +package fake diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go new file mode 100644 index 0000000000..da045ce90f --- /dev/null +++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificaterevocationrequest.go @@ -0,0 +1,188 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCertificateRevocationRequests implements CertificateRevocationRequestInterface +type FakeCertificateRevocationRequests struct { + Fake *FakeCertificatesV1alpha1 + ns string +} + +var certificaterevocationrequestsResource = v1alpha1.SchemeGroupVersion.WithResource("certificaterevocationrequests") + +var certificaterevocationrequestsKind = v1alpha1.SchemeGroupVersion.WithKind("CertificateRevocationRequest") + +// Get takes name of the certificateRevocationRequest, and returns the corresponding certificateRevocationRequest object, and an error if there is any. +func (c *FakeCertificateRevocationRequests) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificaterevocationrequestsResource, c.ns, name), &v1alpha1.CertificateRevocationRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateRevocationRequest), err +} + +// List takes label and field selectors, and returns the list of CertificateRevocationRequests that match those selectors. +func (c *FakeCertificateRevocationRequests) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CertificateRevocationRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificaterevocationrequestsResource, certificaterevocationrequestsKind, c.ns, opts), &v1alpha1.CertificateRevocationRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.CertificateRevocationRequestList{ListMeta: obj.(*v1alpha1.CertificateRevocationRequestList).ListMeta} + for _, item := range obj.(*v1alpha1.CertificateRevocationRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificateRevocationRequests. +func (c *FakeCertificateRevocationRequests) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificaterevocationrequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a certificateRevocationRequest and creates it. Returns the server's representation of the certificateRevocationRequest, and an error, if there is any. +func (c *FakeCertificateRevocationRequests) Create(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.CreateOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificaterevocationrequestsResource, c.ns, certificateRevocationRequest), &v1alpha1.CertificateRevocationRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateRevocationRequest), err +} + +// Update takes the representation of a certificateRevocationRequest and updates it. Returns the server's representation of the certificateRevocationRequest, and an error, if there is any. +func (c *FakeCertificateRevocationRequests) Update(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.UpdateOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificaterevocationrequestsResource, c.ns, certificateRevocationRequest), &v1alpha1.CertificateRevocationRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateRevocationRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificateRevocationRequests) UpdateStatus(ctx context.Context, certificateRevocationRequest *v1alpha1.CertificateRevocationRequest, opts v1.UpdateOptions) (*v1alpha1.CertificateRevocationRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificaterevocationrequestsResource, "status", c.ns, certificateRevocationRequest), &v1alpha1.CertificateRevocationRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateRevocationRequest), err +} + +// Delete takes name of the certificateRevocationRequest and deletes it. Returns an error if one occurs. +func (c *FakeCertificateRevocationRequests) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(certificaterevocationrequestsResource, c.ns, name, opts), &v1alpha1.CertificateRevocationRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificateRevocationRequests) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificaterevocationrequestsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.CertificateRevocationRequestList{}) + return err +} + +// Patch applies the patch and returns the patched certificateRevocationRequest. +func (c *FakeCertificateRevocationRequests) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertificateRevocationRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificaterevocationrequestsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CertificateRevocationRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateRevocationRequest), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied certificateRevocationRequest. +func (c *FakeCertificateRevocationRequests) Apply(ctx context.Context, certificateRevocationRequest *certificatesv1alpha1.CertificateRevocationRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + if certificateRevocationRequest == nil { + return nil, fmt.Errorf("certificateRevocationRequest provided to Apply must not be nil") + } + data, err := json.Marshal(certificateRevocationRequest) + if err != nil { + return nil, err + } + name := certificateRevocationRequest.Name + if name == nil { + return nil, fmt.Errorf("certificateRevocationRequest.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificaterevocationrequestsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.CertificateRevocationRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateRevocationRequest), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeCertificateRevocationRequests) ApplyStatus(ctx context.Context, certificateRevocationRequest *certificatesv1alpha1.CertificateRevocationRequestApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateRevocationRequest, err error) { + if certificateRevocationRequest == nil { + return nil, fmt.Errorf("certificateRevocationRequest provided to Apply must not be nil") + } + data, err := json.Marshal(certificateRevocationRequest) + if err != nil { + return nil, err + } + name := certificateRevocationRequest.Name + if name == nil { + return nil, fmt.Errorf("certificateRevocationRequest.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificaterevocationrequestsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha1.CertificateRevocationRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateRevocationRequest), err +} diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificates_client.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificates_client.go new file mode 100644 index 0000000000..0ce3042171 --- /dev/null +++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificates_client.go @@ -0,0 +1,43 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "github.com/openshift/hypershift/client/clientset/clientset/typed/certificates/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeCertificatesV1alpha1 struct { + *testing.Fake +} + +func (c *FakeCertificatesV1alpha1) CertificateRevocationRequests(namespace string) v1alpha1.CertificateRevocationRequestInterface { + return &FakeCertificateRevocationRequests{c, namespace} +} + +func (c *FakeCertificatesV1alpha1) CertificateSigningRequestApprovals(namespace string) v1alpha1.CertificateSigningRequestApprovalInterface { + return &FakeCertificateSigningRequestApprovals{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeCertificatesV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go new file mode 100644 index 0000000000..fd365c96c3 --- /dev/null +++ b/client/clientset/clientset/typed/certificates/v1alpha1/fake/fake_certificatesigningrequestapproval.go @@ -0,0 +1,188 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + certificatesv1alpha1 "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCertificateSigningRequestApprovals implements CertificateSigningRequestApprovalInterface +type FakeCertificateSigningRequestApprovals struct { + Fake *FakeCertificatesV1alpha1 + ns string +} + +var certificatesigningrequestapprovalsResource = v1alpha1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals") + +var certificatesigningrequestapprovalsKind = v1alpha1.SchemeGroupVersion.WithKind("CertificateSigningRequestApproval") + +// Get takes name of the certificateSigningRequestApproval, and returns the corresponding certificateSigningRequestApproval object, and an error if there is any. +func (c *FakeCertificateSigningRequestApprovals) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificatesigningrequestapprovalsResource, c.ns, name), &v1alpha1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), err +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequestApprovals that match those selectors. +func (c *FakeCertificateSigningRequestApprovals) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.CertificateSigningRequestApprovalList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificatesigningrequestapprovalsResource, certificatesigningrequestapprovalsKind, c.ns, opts), &v1alpha1.CertificateSigningRequestApprovalList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.CertificateSigningRequestApprovalList{ListMeta: obj.(*v1alpha1.CertificateSigningRequestApprovalList).ListMeta} + for _, item := range obj.(*v1alpha1.CertificateSigningRequestApprovalList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequestApprovals. +func (c *FakeCertificateSigningRequestApprovals) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificatesigningrequestapprovalsResource, c.ns, opts)) + +} + +// Create takes the representation of a certificateSigningRequestApproval and creates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *FakeCertificateSigningRequestApprovals) Create(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.CreateOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificatesigningrequestapprovalsResource, c.ns, certificateSigningRequestApproval), &v1alpha1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), err +} + +// Update takes the representation of a certificateSigningRequestApproval and updates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *FakeCertificateSigningRequestApprovals) Update(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificatesigningrequestapprovalsResource, c.ns, certificateSigningRequestApproval), &v1alpha1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificateSigningRequestApprovals) UpdateStatus(ctx context.Context, certificateSigningRequestApproval *v1alpha1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (*v1alpha1.CertificateSigningRequestApproval, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificatesigningrequestapprovalsResource, "status", c.ns, certificateSigningRequestApproval), &v1alpha1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), err +} + +// Delete takes name of the certificateSigningRequestApproval and deletes it. Returns an error if one occurs. +func (c *FakeCertificateSigningRequestApprovals) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(certificatesigningrequestapprovalsResource, c.ns, name, opts), &v1alpha1.CertificateSigningRequestApproval{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificateSigningRequestApprovals) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificatesigningrequestapprovalsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.CertificateSigningRequestApprovalList{}) + return err +} + +// Patch applies the patch and returns the patched certificateSigningRequestApproval. +func (c *FakeCertificateSigningRequestApprovals) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesigningrequestapprovalsResource, c.ns, name, pt, data, subresources...), &v1alpha1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequestApproval. +func (c *FakeCertificateSigningRequestApprovals) Apply(ctx context.Context, certificateSigningRequestApproval *certificatesv1alpha1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesigningrequestapprovalsResource, c.ns, *name, types.ApplyPatchType, data), &v1alpha1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeCertificateSigningRequestApprovals) ApplyStatus(ctx context.Context, certificateSigningRequestApproval *certificatesv1alpha1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesigningrequestapprovalsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1alpha1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), err +} diff --git a/vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/flags.go b/client/clientset/clientset/typed/certificates/v1alpha1/generated_expansion.go similarity index 65% rename from vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/flags.go rename to client/clientset/clientset/typed/certificates/v1alpha1/generated_expansion.go index ff21385b2b..0cbdc6d3c8 100644 --- a/vendor/sigs.k8s.io/apiserver-network-proxy/pkg/util/flags.go +++ b/client/clientset/clientset/typed/certificates/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,13 +13,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by client-gen. DO NOT EDIT. -package util +package v1alpha1 -import "strings" +type CertificateRevocationRequestExpansion interface{} -// Normalize replaces underscores with hyphens -// we should always use hyphens instead of underscores when registering component flags -func Normalize(s string) string { - return strings.Replace(s, "_", "-", -1) -} +type CertificateSigningRequestApprovalExpansion interface{} diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..313ecdd31c --- /dev/null +++ b/client/clientset/clientset/typed/hypershift/v1beta1/certificatesigningrequestapproval.go @@ -0,0 +1,255 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" + scheme "github.com/openshift/hypershift/client/clientset/clientset/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// CertificateSigningRequestApprovalsGetter has a method to return a CertificateSigningRequestApprovalInterface. +// A group's client should implement this interface. +type CertificateSigningRequestApprovalsGetter interface { + CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalInterface +} + +// CertificateSigningRequestApprovalInterface has methods to work with CertificateSigningRequestApproval resources. +type CertificateSigningRequestApprovalInterface interface { + Create(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.CreateOptions) (*v1beta1.CertificateSigningRequestApproval, error) + Update(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequestApproval, error) + UpdateStatus(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequestApproval, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.CertificateSigningRequestApproval, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.CertificateSigningRequestApprovalList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequestApproval, err error) + Apply(ctx context.Context, certificateSigningRequestApproval *hypershiftv1beta1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) + ApplyStatus(ctx context.Context, certificateSigningRequestApproval *hypershiftv1beta1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) + CertificateSigningRequestApprovalExpansion +} + +// certificateSigningRequestApprovals implements CertificateSigningRequestApprovalInterface +type certificateSigningRequestApprovals struct { + client rest.Interface + ns string +} + +// newCertificateSigningRequestApprovals returns a CertificateSigningRequestApprovals +func newCertificateSigningRequestApprovals(c *HypershiftV1beta1Client, namespace string) *certificateSigningRequestApprovals { + return &certificateSigningRequestApprovals{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the certificateSigningRequestApproval, and returns the corresponding certificateSigningRequestApproval object, and an error if there is any. +func (c *certificateSigningRequestApprovals) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + result = &v1beta1.CertificateSigningRequestApproval{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequestApprovals that match those selectors. +func (c *certificateSigningRequestApprovals) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestApprovalList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CertificateSigningRequestApprovalList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequestApprovals. +func (c *certificateSigningRequestApprovals) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a certificateSigningRequestApproval and creates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *certificateSigningRequestApprovals) Create(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + result = &v1beta1.CertificateSigningRequestApproval{} + err = c.client.Post(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateSigningRequestApproval). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a certificateSigningRequestApproval and updates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *certificateSigningRequestApprovals) Update(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + result = &v1beta1.CertificateSigningRequestApproval{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(certificateSigningRequestApproval.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateSigningRequestApproval). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *certificateSigningRequestApprovals) UpdateStatus(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + result = &v1beta1.CertificateSigningRequestApproval{} + err = c.client.Put(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(certificateSigningRequestApproval.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(certificateSigningRequestApproval). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the certificateSigningRequestApproval and deletes it. Returns an error if one occurs. +func (c *certificateSigningRequestApprovals) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateSigningRequestApprovals) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched certificateSigningRequestApproval. +func (c *certificateSigningRequestApprovals) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequestApproval, err error) { + result = &v1beta1.CertificateSigningRequestApproval{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequestApproval. +func (c *certificateSigningRequestApprovals) Apply(ctx context.Context, certificateSigningRequestApproval *hypershiftv1beta1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + result = &v1beta1.CertificateSigningRequestApproval{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *certificateSigningRequestApprovals) ApplyStatus(ctx context.Context, certificateSigningRequestApproval *hypershiftv1beta1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + + result = &v1beta1.CertificateSigningRequestApproval{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("certificatesigningrequestapprovals"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go new file mode 100644 index 0000000000..6d9e62e34d --- /dev/null +++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_certificatesigningrequestapproval.go @@ -0,0 +1,188 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftv1beta1 "github.com/openshift/hypershift/client/applyconfiguration/hypershift/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeCertificateSigningRequestApprovals implements CertificateSigningRequestApprovalInterface +type FakeCertificateSigningRequestApprovals struct { + Fake *FakeHypershiftV1beta1 + ns string +} + +var certificatesigningrequestapprovalsResource = v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals") + +var certificatesigningrequestapprovalsKind = v1beta1.SchemeGroupVersion.WithKind("CertificateSigningRequestApproval") + +// Get takes name of the certificateSigningRequestApproval, and returns the corresponding certificateSigningRequestApproval object, and an error if there is any. +func (c *FakeCertificateSigningRequestApprovals) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(certificatesigningrequestapprovalsResource, c.ns, name), &v1beta1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequestApproval), err +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequestApprovals that match those selectors. +func (c *FakeCertificateSigningRequestApprovals) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestApprovalList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(certificatesigningrequestapprovalsResource, certificatesigningrequestapprovalsKind, c.ns, opts), &v1beta1.CertificateSigningRequestApprovalList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.CertificateSigningRequestApprovalList{ListMeta: obj.(*v1beta1.CertificateSigningRequestApprovalList).ListMeta} + for _, item := range obj.(*v1beta1.CertificateSigningRequestApprovalList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequestApprovals. +func (c *FakeCertificateSigningRequestApprovals) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(certificatesigningrequestapprovalsResource, c.ns, opts)) + +} + +// Create takes the representation of a certificateSigningRequestApproval and creates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *FakeCertificateSigningRequestApprovals) Create(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.CreateOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(certificatesigningrequestapprovalsResource, c.ns, certificateSigningRequestApproval), &v1beta1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequestApproval), err +} + +// Update takes the representation of a certificateSigningRequestApproval and updates it. Returns the server's representation of the certificateSigningRequestApproval, and an error, if there is any. +func (c *FakeCertificateSigningRequestApprovals) Update(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(certificatesigningrequestapprovalsResource, c.ns, certificateSigningRequestApproval), &v1beta1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequestApproval), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeCertificateSigningRequestApprovals) UpdateStatus(ctx context.Context, certificateSigningRequestApproval *v1beta1.CertificateSigningRequestApproval, opts v1.UpdateOptions) (*v1beta1.CertificateSigningRequestApproval, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(certificatesigningrequestapprovalsResource, "status", c.ns, certificateSigningRequestApproval), &v1beta1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequestApproval), err +} + +// Delete takes name of the certificateSigningRequestApproval and deletes it. Returns an error if one occurs. +func (c *FakeCertificateSigningRequestApprovals) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(certificatesigningrequestapprovalsResource, c.ns, name, opts), &v1beta1.CertificateSigningRequestApproval{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeCertificateSigningRequestApprovals) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(certificatesigningrequestapprovalsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.CertificateSigningRequestApprovalList{}) + return err +} + +// Patch applies the patch and returns the patched certificateSigningRequestApproval. +func (c *FakeCertificateSigningRequestApprovals) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.CertificateSigningRequestApproval, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesigningrequestapprovalsResource, c.ns, name, pt, data, subresources...), &v1beta1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequestApproval), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied certificateSigningRequestApproval. +func (c *FakeCertificateSigningRequestApprovals) Apply(ctx context.Context, certificateSigningRequestApproval *hypershiftv1beta1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesigningrequestapprovalsResource, c.ns, *name, types.ApplyPatchType, data), &v1beta1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequestApproval), err +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *FakeCertificateSigningRequestApprovals) ApplyStatus(ctx context.Context, certificateSigningRequestApproval *hypershiftv1beta1.CertificateSigningRequestApprovalApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.CertificateSigningRequestApproval, err error) { + if certificateSigningRequestApproval == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval provided to Apply must not be nil") + } + data, err := json.Marshal(certificateSigningRequestApproval) + if err != nil { + return nil, err + } + name := certificateSigningRequestApproval.Name + if name == nil { + return nil, fmt.Errorf("certificateSigningRequestApproval.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(certificatesigningrequestapprovalsResource, c.ns, *name, types.ApplyPatchType, data, "status"), &v1beta1.CertificateSigningRequestApproval{}) + + if obj == nil { + return nil, err + } + return obj.(*v1beta1.CertificateSigningRequestApproval), err +} diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hypershift_client.go b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hypershift_client.go index 6d0b9d23a2..c5d32a5901 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hypershift_client.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/fake/fake_hypershift_client.go @@ -27,6 +27,10 @@ type FakeHypershiftV1beta1 struct { *testing.Fake } +func (c *FakeHypershiftV1beta1) CertificateSigningRequestApprovals(namespace string) v1beta1.CertificateSigningRequestApprovalInterface { + return &FakeCertificateSigningRequestApprovals{c, namespace} +} + func (c *FakeHypershiftV1beta1) HostedClusters(namespace string) v1beta1.HostedClusterInterface { return &FakeHostedClusters{c, namespace} } diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/generated_expansion.go b/client/clientset/clientset/typed/hypershift/v1beta1/generated_expansion.go index 29c529638c..33cbdad7ac 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/generated_expansion.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/generated_expansion.go @@ -17,6 +17,8 @@ limitations under the License. package v1beta1 +type CertificateSigningRequestApprovalExpansion interface{} + type HostedClusterExpansion interface{} type HostedControlPlaneExpansion interface{} diff --git a/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go b/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go index dfcdaf022e..7ce2da0f94 100644 --- a/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go +++ b/client/clientset/clientset/typed/hypershift/v1beta1/hypershift_client.go @@ -27,6 +27,7 @@ import ( type HypershiftV1beta1Interface interface { RESTClient() rest.Interface + CertificateSigningRequestApprovalsGetter HostedClustersGetter HostedControlPlanesGetter NodePoolsGetter @@ -37,6 +38,10 @@ type HypershiftV1beta1Client struct { restClient rest.Interface } +func (c *HypershiftV1beta1Client) CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalInterface { + return newCertificateSigningRequestApprovals(c, namespace) +} + func (c *HypershiftV1beta1Client) HostedClusters(namespace string) HostedClusterInterface { return newHostedClusters(c, namespace) } diff --git a/client/informers/externalversions/certificates/interface.go b/client/informers/externalversions/certificates/interface.go new file mode 100644 index 0000000000..d7cb923ba1 --- /dev/null +++ b/client/informers/externalversions/certificates/interface.go @@ -0,0 +1,45 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package certificates + +import ( + v1alpha1 "github.com/openshift/hypershift/client/informers/externalversions/certificates/v1alpha1" + internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go b/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go new file mode 100644 index 0000000000..b79ab1e28e --- /dev/null +++ b/client/informers/externalversions/certificates/v1alpha1/certificaterevocationrequest.go @@ -0,0 +1,89 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + clientset "github.com/openshift/hypershift/client/clientset/clientset" + internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/openshift/hypershift/client/listers/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CertificateRevocationRequestInformer provides access to a shared informer and lister for +// CertificateRevocationRequests. +type CertificateRevocationRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.CertificateRevocationRequestLister +} + +type certificateRevocationRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateRevocationRequestInformer constructs a new informer for CertificateRevocationRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateRevocationRequestInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateRevocationRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateRevocationRequestInformer constructs a new informer for CertificateRevocationRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateRevocationRequestInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1alpha1().CertificateRevocationRequests(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1alpha1().CertificateRevocationRequests(namespace).Watch(context.TODO(), options) + }, + }, + &certificatesv1alpha1.CertificateRevocationRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateRevocationRequestInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateRevocationRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateRevocationRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certificatesv1alpha1.CertificateRevocationRequest{}, f.defaultInformer) +} + +func (f *certificateRevocationRequestInformer) Lister() v1alpha1.CertificateRevocationRequestLister { + return v1alpha1.NewCertificateRevocationRequestLister(f.Informer().GetIndexer()) +} diff --git a/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..98eda861a8 --- /dev/null +++ b/client/informers/externalversions/certificates/v1alpha1/certificatesigningrequestapproval.go @@ -0,0 +1,89 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + clientset "github.com/openshift/hypershift/client/clientset/clientset" + internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/openshift/hypershift/client/listers/certificates/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CertificateSigningRequestApprovalInformer provides access to a shared informer and lister for +// CertificateSigningRequestApprovals. +type CertificateSigningRequestApprovalInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.CertificateSigningRequestApprovalLister +} + +type certificateSigningRequestApprovalInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateSigningRequestApprovalInformer constructs a new informer for CertificateSigningRequestApproval type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateSigningRequestApprovalInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestApprovalInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateSigningRequestApprovalInformer constructs a new informer for CertificateSigningRequestApproval type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateSigningRequestApprovalInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1alpha1().CertificateSigningRequestApprovals(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1alpha1().CertificateSigningRequestApprovals(namespace).Watch(context.TODO(), options) + }, + }, + &certificatesv1alpha1.CertificateSigningRequestApproval{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateSigningRequestApprovalInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestApprovalInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateSigningRequestApprovalInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certificatesv1alpha1.CertificateSigningRequestApproval{}, f.defaultInformer) +} + +func (f *certificateSigningRequestApprovalInformer) Lister() v1alpha1.CertificateSigningRequestApprovalLister { + return v1alpha1.NewCertificateSigningRequestApprovalLister(f.Informer().GetIndexer()) +} diff --git a/client/informers/externalversions/certificates/v1alpha1/interface.go b/client/informers/externalversions/certificates/v1alpha1/interface.go new file mode 100644 index 0000000000..0533d93761 --- /dev/null +++ b/client/informers/externalversions/certificates/v1alpha1/interface.go @@ -0,0 +1,51 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CertificateRevocationRequests returns a CertificateRevocationRequestInformer. + CertificateRevocationRequests() CertificateRevocationRequestInformer + // CertificateSigningRequestApprovals returns a CertificateSigningRequestApprovalInformer. + CertificateSigningRequestApprovals() CertificateSigningRequestApprovalInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CertificateRevocationRequests returns a CertificateRevocationRequestInformer. +func (v *version) CertificateRevocationRequests() CertificateRevocationRequestInformer { + return &certificateRevocationRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// CertificateSigningRequestApprovals returns a CertificateSigningRequestApprovalInformer. +func (v *version) CertificateSigningRequestApprovals() CertificateSigningRequestApprovalInformer { + return &certificateSigningRequestApprovalInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/client/informers/externalversions/factory.go b/client/informers/externalversions/factory.go index 65084231e8..a7d28229b8 100644 --- a/client/informers/externalversions/factory.go +++ b/client/informers/externalversions/factory.go @@ -23,6 +23,7 @@ import ( time "time" clientset "github.com/openshift/hypershift/client/clientset/clientset" + certificates "github.com/openshift/hypershift/client/informers/externalversions/certificates" hypershift "github.com/openshift/hypershift/client/informers/externalversions/hypershift" internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -252,9 +253,14 @@ type SharedInformerFactory interface { // client. InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Certificates() certificates.Interface Hypershift() hypershift.Interface } +func (f *sharedInformerFactory) Certificates() certificates.Interface { + return certificates.New(f, f.namespace, f.tweakListOptions) +} + func (f *sharedInformerFactory) Hypershift() hypershift.Interface { return hypershift.New(f, f.namespace, f.tweakListOptions) } diff --git a/client/informers/externalversions/generic.go b/client/informers/externalversions/generic.go index 2c001b6698..4d00511a41 100644 --- a/client/informers/externalversions/generic.go +++ b/client/informers/externalversions/generic.go @@ -20,7 +20,8 @@ package externalversions import ( "fmt" - v1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + hypershiftv1alpha1 "github.com/openshift/hypershift/api/hypershift/v1alpha1" v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" @@ -52,13 +53,21 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=hypershift.openshift.io, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("hostedclusters"): + // Group=certificates.hypershift.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("certificaterevocationrequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().CertificateRevocationRequests().Informer()}, nil + case v1alpha1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1alpha1().CertificateSigningRequestApprovals().Informer()}, nil + + // Group=hypershift.openshift.io, Version=v1alpha1 + case hypershiftv1alpha1.SchemeGroupVersion.WithResource("hostedclusters"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hypershift().V1alpha1().HostedClusters().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("nodepools"): + case hypershiftv1alpha1.SchemeGroupVersion.WithResource("nodepools"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hypershift().V1alpha1().NodePools().Informer()}, nil // Group=hypershift.openshift.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Hypershift().V1beta1().CertificateSigningRequestApprovals().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("hostedclusters"): return &genericInformer{resource: resource.GroupResource(), informer: f.Hypershift().V1beta1().HostedClusters().Informer()}, nil case v1beta1.SchemeGroupVersion.WithResource("hostedcontrolplanes"): diff --git a/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..bd82a48460 --- /dev/null +++ b/client/informers/externalversions/hypershift/v1beta1/certificatesigningrequestapproval.go @@ -0,0 +1,89 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + clientset "github.com/openshift/hypershift/client/clientset/clientset" + internalinterfaces "github.com/openshift/hypershift/client/informers/externalversions/internalinterfaces" + v1beta1 "github.com/openshift/hypershift/client/listers/hypershift/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CertificateSigningRequestApprovalInformer provides access to a shared informer and lister for +// CertificateSigningRequestApprovals. +type CertificateSigningRequestApprovalInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CertificateSigningRequestApprovalLister +} + +type certificateSigningRequestApprovalInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCertificateSigningRequestApprovalInformer constructs a new informer for CertificateSigningRequestApproval type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateSigningRequestApprovalInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestApprovalInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateSigningRequestApprovalInformer constructs a new informer for CertificateSigningRequestApproval type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateSigningRequestApprovalInformer(client clientset.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HypershiftV1beta1().CertificateSigningRequestApprovals(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.HypershiftV1beta1().CertificateSigningRequestApprovals(namespace).Watch(context.TODO(), options) + }, + }, + &hypershiftv1beta1.CertificateSigningRequestApproval{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateSigningRequestApprovalInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestApprovalInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateSigningRequestApprovalInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&hypershiftv1beta1.CertificateSigningRequestApproval{}, f.defaultInformer) +} + +func (f *certificateSigningRequestApprovalInformer) Lister() v1beta1.CertificateSigningRequestApprovalLister { + return v1beta1.NewCertificateSigningRequestApprovalLister(f.Informer().GetIndexer()) +} diff --git a/client/informers/externalversions/hypershift/v1beta1/interface.go b/client/informers/externalversions/hypershift/v1beta1/interface.go index 098e82000f..9178b91e24 100644 --- a/client/informers/externalversions/hypershift/v1beta1/interface.go +++ b/client/informers/externalversions/hypershift/v1beta1/interface.go @@ -23,6 +23,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { + // CertificateSigningRequestApprovals returns a CertificateSigningRequestApprovalInformer. + CertificateSigningRequestApprovals() CertificateSigningRequestApprovalInformer // HostedClusters returns a HostedClusterInformer. HostedClusters() HostedClusterInformer // HostedControlPlanes returns a HostedControlPlaneInformer. @@ -42,6 +44,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// CertificateSigningRequestApprovals returns a CertificateSigningRequestApprovalInformer. +func (v *version) CertificateSigningRequestApprovals() CertificateSigningRequestApprovalInformer { + return &certificateSigningRequestApprovalInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // HostedClusters returns a HostedClusterInformer. func (v *version) HostedClusters() HostedClusterInformer { return &hostedClusterInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/client/listers/certificates/v1alpha1/certificaterevocationrequest.go b/client/listers/certificates/v1alpha1/certificaterevocationrequest.go new file mode 100644 index 0000000000..fcade5c51d --- /dev/null +++ b/client/listers/certificates/v1alpha1/certificaterevocationrequest.go @@ -0,0 +1,98 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateRevocationRequestLister helps list CertificateRevocationRequests. +// All objects returned here must be treated as read-only. +type CertificateRevocationRequestLister interface { + // List lists all CertificateRevocationRequests in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.CertificateRevocationRequest, err error) + // CertificateRevocationRequests returns an object that can list and get CertificateRevocationRequests. + CertificateRevocationRequests(namespace string) CertificateRevocationRequestNamespaceLister + CertificateRevocationRequestListerExpansion +} + +// certificateRevocationRequestLister implements the CertificateRevocationRequestLister interface. +type certificateRevocationRequestLister struct { + indexer cache.Indexer +} + +// NewCertificateRevocationRequestLister returns a new CertificateRevocationRequestLister. +func NewCertificateRevocationRequestLister(indexer cache.Indexer) CertificateRevocationRequestLister { + return &certificateRevocationRequestLister{indexer: indexer} +} + +// List lists all CertificateRevocationRequests in the indexer. +func (s *certificateRevocationRequestLister) List(selector labels.Selector) (ret []*v1alpha1.CertificateRevocationRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.CertificateRevocationRequest)) + }) + return ret, err +} + +// CertificateRevocationRequests returns an object that can list and get CertificateRevocationRequests. +func (s *certificateRevocationRequestLister) CertificateRevocationRequests(namespace string) CertificateRevocationRequestNamespaceLister { + return certificateRevocationRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateRevocationRequestNamespaceLister helps list and get CertificateRevocationRequests. +// All objects returned here must be treated as read-only. +type CertificateRevocationRequestNamespaceLister interface { + // List lists all CertificateRevocationRequests in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.CertificateRevocationRequest, err error) + // Get retrieves the CertificateRevocationRequest from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.CertificateRevocationRequest, error) + CertificateRevocationRequestNamespaceListerExpansion +} + +// certificateRevocationRequestNamespaceLister implements the CertificateRevocationRequestNamespaceLister +// interface. +type certificateRevocationRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CertificateRevocationRequests in the indexer for a given namespace. +func (s certificateRevocationRequestNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CertificateRevocationRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.CertificateRevocationRequest)) + }) + return ret, err +} + +// Get retrieves the CertificateRevocationRequest from the indexer for a given namespace and name. +func (s certificateRevocationRequestNamespaceLister) Get(name string) (*v1alpha1.CertificateRevocationRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("certificaterevocationrequest"), name) + } + return obj.(*v1alpha1.CertificateRevocationRequest), nil +} diff --git a/client/listers/certificates/v1alpha1/certificatesigningrequestapproval.go b/client/listers/certificates/v1alpha1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..97f0325de1 --- /dev/null +++ b/client/listers/certificates/v1alpha1/certificatesigningrequestapproval.go @@ -0,0 +1,98 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateSigningRequestApprovalLister helps list CertificateSigningRequestApprovals. +// All objects returned here must be treated as read-only. +type CertificateSigningRequestApprovalLister interface { + // List lists all CertificateSigningRequestApprovals in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.CertificateSigningRequestApproval, err error) + // CertificateSigningRequestApprovals returns an object that can list and get CertificateSigningRequestApprovals. + CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalNamespaceLister + CertificateSigningRequestApprovalListerExpansion +} + +// certificateSigningRequestApprovalLister implements the CertificateSigningRequestApprovalLister interface. +type certificateSigningRequestApprovalLister struct { + indexer cache.Indexer +} + +// NewCertificateSigningRequestApprovalLister returns a new CertificateSigningRequestApprovalLister. +func NewCertificateSigningRequestApprovalLister(indexer cache.Indexer) CertificateSigningRequestApprovalLister { + return &certificateSigningRequestApprovalLister{indexer: indexer} +} + +// List lists all CertificateSigningRequestApprovals in the indexer. +func (s *certificateSigningRequestApprovalLister) List(selector labels.Selector) (ret []*v1alpha1.CertificateSigningRequestApproval, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.CertificateSigningRequestApproval)) + }) + return ret, err +} + +// CertificateSigningRequestApprovals returns an object that can list and get CertificateSigningRequestApprovals. +func (s *certificateSigningRequestApprovalLister) CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalNamespaceLister { + return certificateSigningRequestApprovalNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateSigningRequestApprovalNamespaceLister helps list and get CertificateSigningRequestApprovals. +// All objects returned here must be treated as read-only. +type CertificateSigningRequestApprovalNamespaceLister interface { + // List lists all CertificateSigningRequestApprovals in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.CertificateSigningRequestApproval, err error) + // Get retrieves the CertificateSigningRequestApproval from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.CertificateSigningRequestApproval, error) + CertificateSigningRequestApprovalNamespaceListerExpansion +} + +// certificateSigningRequestApprovalNamespaceLister implements the CertificateSigningRequestApprovalNamespaceLister +// interface. +type certificateSigningRequestApprovalNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CertificateSigningRequestApprovals in the indexer for a given namespace. +func (s certificateSigningRequestApprovalNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.CertificateSigningRequestApproval, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.CertificateSigningRequestApproval)) + }) + return ret, err +} + +// Get retrieves the CertificateSigningRequestApproval from the indexer for a given namespace and name. +func (s certificateSigningRequestApprovalNamespaceLister) Get(name string) (*v1alpha1.CertificateSigningRequestApproval, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("certificatesigningrequestapproval"), name) + } + return obj.(*v1alpha1.CertificateSigningRequestApproval), nil +} diff --git a/client/listers/certificates/v1alpha1/expansion_generated.go b/client/listers/certificates/v1alpha1/expansion_generated.go new file mode 100644 index 0000000000..cc6e55867e --- /dev/null +++ b/client/listers/certificates/v1alpha1/expansion_generated.go @@ -0,0 +1,34 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// CertificateRevocationRequestListerExpansion allows custom methods to be added to +// CertificateRevocationRequestLister. +type CertificateRevocationRequestListerExpansion interface{} + +// CertificateRevocationRequestNamespaceListerExpansion allows custom methods to be added to +// CertificateRevocationRequestNamespaceLister. +type CertificateRevocationRequestNamespaceListerExpansion interface{} + +// CertificateSigningRequestApprovalListerExpansion allows custom methods to be added to +// CertificateSigningRequestApprovalLister. +type CertificateSigningRequestApprovalListerExpansion interface{} + +// CertificateSigningRequestApprovalNamespaceListerExpansion allows custom methods to be added to +// CertificateSigningRequestApprovalNamespaceLister. +type CertificateSigningRequestApprovalNamespaceListerExpansion interface{} diff --git a/client/listers/hypershift/v1beta1/certificatesigningrequestapproval.go b/client/listers/hypershift/v1beta1/certificatesigningrequestapproval.go new file mode 100644 index 0000000000..2a1ca77486 --- /dev/null +++ b/client/listers/hypershift/v1beta1/certificatesigningrequestapproval.go @@ -0,0 +1,98 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateSigningRequestApprovalLister helps list CertificateSigningRequestApprovals. +// All objects returned here must be treated as read-only. +type CertificateSigningRequestApprovalLister interface { + // List lists all CertificateSigningRequestApprovals in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequestApproval, err error) + // CertificateSigningRequestApprovals returns an object that can list and get CertificateSigningRequestApprovals. + CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalNamespaceLister + CertificateSigningRequestApprovalListerExpansion +} + +// certificateSigningRequestApprovalLister implements the CertificateSigningRequestApprovalLister interface. +type certificateSigningRequestApprovalLister struct { + indexer cache.Indexer +} + +// NewCertificateSigningRequestApprovalLister returns a new CertificateSigningRequestApprovalLister. +func NewCertificateSigningRequestApprovalLister(indexer cache.Indexer) CertificateSigningRequestApprovalLister { + return &certificateSigningRequestApprovalLister{indexer: indexer} +} + +// List lists all CertificateSigningRequestApprovals in the indexer. +func (s *certificateSigningRequestApprovalLister) List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequestApproval, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CertificateSigningRequestApproval)) + }) + return ret, err +} + +// CertificateSigningRequestApprovals returns an object that can list and get CertificateSigningRequestApprovals. +func (s *certificateSigningRequestApprovalLister) CertificateSigningRequestApprovals(namespace string) CertificateSigningRequestApprovalNamespaceLister { + return certificateSigningRequestApprovalNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CertificateSigningRequestApprovalNamespaceLister helps list and get CertificateSigningRequestApprovals. +// All objects returned here must be treated as read-only. +type CertificateSigningRequestApprovalNamespaceLister interface { + // List lists all CertificateSigningRequestApprovals in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequestApproval, err error) + // Get retrieves the CertificateSigningRequestApproval from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.CertificateSigningRequestApproval, error) + CertificateSigningRequestApprovalNamespaceListerExpansion +} + +// certificateSigningRequestApprovalNamespaceLister implements the CertificateSigningRequestApprovalNamespaceLister +// interface. +type certificateSigningRequestApprovalNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CertificateSigningRequestApprovals in the indexer for a given namespace. +func (s certificateSigningRequestApprovalNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequestApproval, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CertificateSigningRequestApproval)) + }) + return ret, err +} + +// Get retrieves the CertificateSigningRequestApproval from the indexer for a given namespace and name. +func (s certificateSigningRequestApprovalNamespaceLister) Get(name string) (*v1beta1.CertificateSigningRequestApproval, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("certificatesigningrequestapproval"), name) + } + return obj.(*v1beta1.CertificateSigningRequestApproval), nil +} diff --git a/client/listers/hypershift/v1beta1/expansion_generated.go b/client/listers/hypershift/v1beta1/expansion_generated.go index 41f055a9c4..06d27550ca 100644 --- a/client/listers/hypershift/v1beta1/expansion_generated.go +++ b/client/listers/hypershift/v1beta1/expansion_generated.go @@ -17,6 +17,14 @@ limitations under the License. package v1beta1 +// CertificateSigningRequestApprovalListerExpansion allows custom methods to be added to +// CertificateSigningRequestApprovalLister. +type CertificateSigningRequestApprovalListerExpansion interface{} + +// CertificateSigningRequestApprovalNamespaceListerExpansion allows custom methods to be added to +// CertificateSigningRequestApprovalNamespaceLister. +type CertificateSigningRequestApprovalNamespaceListerExpansion interface{} + // HostedClusterListerExpansion allows custom methods to be added to // HostedClusterLister. type HostedClusterListerExpansion interface{} diff --git a/cmd/cluster/agent/create.go b/cmd/cluster/agent/create.go index 4737824cef..ad60840aba 100644 --- a/cmd/cluster/agent/create.go +++ b/cmd/cluster/agent/create.go @@ -26,7 +26,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command { AgentNamespace: "", } - cmd.Flags().StringVar(&opts.AgentPlatform.APIServerAddress, "api-server-address", opts.AgentPlatform.APIServerAddress, "The API server address is the IP address for Kubernetes API communication") + cmd.Flags().StringVar(&opts.AgentPlatform.APIServerAddress, "api-server-address", opts.AgentPlatform.APIServerAddress, "The IP address to be used for the hosted cluster's Kubernetes API communication. Requires management cluster connectivity if left unset.") cmd.Flags().StringVar(&opts.AgentPlatform.AgentNamespace, "agent-namespace", opts.AgentPlatform.AgentNamespace, "The namespace in which to search for Agents") _ = cmd.MarkFlagRequired("agent-namespace") _ = cmd.MarkPersistentFlagRequired("pull-secret") diff --git a/cmd/cluster/cluster.go b/cmd/cluster/cluster.go index ab0d0d1ed9..2e2dad62da 100644 --- a/cmd/cluster/cluster.go +++ b/cmd/cluster/cluster.go @@ -15,6 +15,7 @@ import ( "github.com/openshift/hypershift/cmd/cluster/none" "github.com/openshift/hypershift/cmd/cluster/powervs" "github.com/openshift/hypershift/cmd/log" + "github.com/openshift/hypershift/support/globalconfig" ) func NewCreateCommands() *cobra.Command { @@ -28,8 +29,9 @@ func NewCreateCommands() *cobra.Command { Render: false, InfrastructureJSON: "", InfraID: "", - ServiceCIDR: []string{"172.31.0.0/16"}, - ClusterCIDR: []string{"10.132.0.0/14"}, + ServiceCIDR: []string{globalconfig.DefaultIPv4ServiceCIDR}, + ClusterCIDR: []string{globalconfig.DefaultIPv4ClusterCIDR}, + DefaultDual: false, Wait: false, Timeout: 0, ExternalDNSDomain: "", @@ -41,6 +43,8 @@ func NewCreateCommands() *cobra.Command { NodeUpgradeType: "", Arch: "amd64", OLMCatalogPlacement: v1beta1.ManagementOLMCatalogPlacement, + OLMDisableDefaultSources: false, + NetworkType: string(v1beta1.OVNKubernetes), } cmd := &cobra.Command{ Use: "cluster", @@ -75,14 +79,19 @@ func NewCreateCommands() *cobra.Command { cmd.PersistentFlags().StringVar(&opts.InfraID, "infra-id", opts.InfraID, "Infrastructure ID to use for hosted cluster resources.") cmd.PersistentFlags().StringArrayVar(&opts.ServiceCIDR, "service-cidr", opts.ServiceCIDR, "The CIDR of the service network. Can be specified multiple times.") cmd.PersistentFlags().StringArrayVar(&opts.ClusterCIDR, "cluster-cidr", opts.ClusterCIDR, "The CIDR of the cluster network. Can be specified multiple times.") + cmd.PersistentFlags().BoolVar(&opts.DefaultDual, "default-dual", opts.DefaultDual, "Defines the Service and Cluster CIDRs as dual-stack default values. Cannot be defined with service-cidr or cluster-cidr flag.") cmd.PersistentFlags().StringToStringVar(&opts.NodeSelector, "node-selector", opts.NodeSelector, "A comma separated list of key=value to use as node selector for the Hosted Control Plane pods to stick to. E.g. role=cp,disk=fast") cmd.PersistentFlags().BoolVar(&opts.Wait, "wait", opts.Wait, "If the create command should block until the cluster is up. Requires at least one node.") cmd.PersistentFlags().DurationVar(&opts.Timeout, "timeout", opts.Timeout, "If the --wait flag is set, set the optional timeout to limit the waiting duration. The format is duration; e.g. 30s or 1h30m45s; 0 means no timeout; default = 0") cmd.PersistentFlags().Var(&opts.NodeUpgradeType, "node-upgrade-type", "The NodePool upgrade strategy for how nodes should behave when upgraded. Supported options: Replace, InPlace") - cmd.PersistentFlags().Var(&opts.OLMCatalogPlacement, "olmCatalogPlacement", "The OLM Catalog Placement for the HostedCluster. Supported options: Management, Guest") + cmd.PersistentFlags().Var(&opts.OLMCatalogPlacement, "olm-catalog-placement", "The OLM Catalog Placement for the HostedCluster. Supported options: Management, Guest") + cmd.PersistentFlags().BoolVar(&opts.OLMDisableDefaultSources, "olm-disable-default-sources", opts.OLMDisableDefaultSources, "Disables the OLM default catalog sources for the HostedCluster.") cmd.PersistentFlags().StringVar(&opts.Arch, "arch", opts.Arch, "The default processor architecture for the NodePool (e.g. arm64, amd64)") cmd.PersistentFlags().StringVar(&opts.PausedUntil, "pausedUntil", opts.PausedUntil, "If a date is provided in RFC3339 format, HostedCluster creation is paused until that date. If the boolean true is provided, HostedCluster creation is paused until the field is removed.") + cmd.MarkFlagsMutuallyExclusive("service-cidr", "default-dual") + cmd.MarkFlagsMutuallyExclusive("cluster-cidr", "default-dual") + cmd.AddCommand(aws.NewCreateCommand(opts)) cmd.AddCommand(none.NewCreateCommand(opts)) cmd.AddCommand(agent.NewCreateCommand(opts)) diff --git a/cmd/cluster/core/create.go b/cmd/cluster/core/create.go index fe6d2ec391..7c1f52f9aa 100644 --- a/cmd/cluster/core/create.go +++ b/cmd/cluster/core/create.go @@ -12,6 +12,7 @@ import ( "strings" "time" + configv1 "github.com/openshift/api/config/v1" "golang.org/x/crypto/ssh" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -22,14 +23,13 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" - "github.com/blang/semver" "github.com/go-logr/logr" apifixtures "github.com/openshift/hypershift/api/fixtures" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/cmd/util" "github.com/openshift/hypershift/cmd/version" hyperapi "github.com/openshift/hypershift/support/api" - "github.com/openshift/hypershift/support/releaseinfo" + "github.com/openshift/hypershift/support/globalconfig" ) // ApplyPlatformSpecifics can be used to create platform specific values as well as enriching the fixture with additional values @@ -62,6 +62,7 @@ type CreateOptions struct { SSHKeyFile string ServiceCIDR []string ClusterCIDR []string + DefaultDual bool ExternalDNSDomain string Arch string NodeSelector map[string]string @@ -79,6 +80,7 @@ type CreateOptions struct { NodeUpgradeType hyperv1.UpgradeType PausedUntil string OLMCatalogPlacement hyperv1.OLMCatalogPlacement + OLMDisableDefaultSources bool // BeforeApply is called immediately before resources are applied to the // server, giving the user an opportunity to inspect or mutate the resources. @@ -133,6 +135,7 @@ type KubevirtPlatformCreateOptions struct { QoSClass string AdditionalNetworks []string AttachDefaultNetwork *bool + VmNodeSelector map[string]string } type AWSPlatformOptions struct { @@ -175,10 +178,6 @@ func createCommonFixture(ctx context.Context, opts *CreateOptions) (*apifixtures opts.ReleaseImage = defaultVersion.PullSpec } - if err := defaultNetworkType(ctx, opts, &releaseinfo.RegistryClientProvider{}, os.ReadFile); err != nil { - return nil, fmt.Errorf("failed to default network: %w", err) - } - annotations := map[string]string{} for _, s := range opts.Annotations { pair := strings.SplitN(s, "=", 2) @@ -241,6 +240,16 @@ func createCommonFixture(ctx context.Context, opts *CreateOptions) (*apifixtures } } + if opts.DefaultDual { + // Using this AgentNamespace field because I cannot infer the Provider we are using at this point + // TODO (jparrill): Refactor this to use generic validations as same as we use the ApplyPlatformSpecificsValues in a follow up PR + if len(opts.AgentPlatform.AgentNamespace) <= 0 { + return nil, fmt.Errorf("--default-dual is only supported on Agent platform") + } + opts.ClusterCIDR = []string{globalconfig.DefaultIPv4ClusterCIDR, globalconfig.DefaultIPv6ClusterCIDR} + opts.ServiceCIDR = []string{globalconfig.DefaultIPv4ServiceCIDR, globalconfig.DefaultIPv6ServiceCIDR} + } + var userCABundle []byte if len(opts.AdditionalTrustBundle) > 0 { userCABundle, err = os.ReadFile(opts.AdditionalTrustBundle) @@ -272,6 +281,13 @@ func createCommonFixture(ctx context.Context, opts *CreateOptions) (*apifixtures } } + var operatorHub *configv1.OperatorHubSpec + if opts.OLMDisableDefaultSources { + operatorHub = &configv1.OperatorHubSpec{ + DisableAllDefaultSources: true, + } + } + return &apifixtures.ExampleOptions{ AdditionalTrustBundle: string(userCABundle), ImageContentSources: imageContentSources, @@ -298,6 +314,7 @@ func createCommonFixture(ctx context.Context, opts *CreateOptions) (*apifixtures UpgradeType: opts.NodeUpgradeType, PausedUntil: opts.PausedUntil, OLMCatalogPlacement: opts.OLMCatalogPlacement, + OperatorHub: operatorHub, }, nil } @@ -472,53 +489,3 @@ func CreateCluster(ctx context.Context, opts *CreateOptions, platformSpecificApp // Otherwise, apply the objects return apply(ctx, opts.Log, exampleOptions, opts.Wait, opts.BeforeApply) } - -func defaultNetworkType(ctx context.Context, opts *CreateOptions, releaseProvider releaseinfo.Provider, readFile func(string) ([]byte, error)) error { - if opts.NetworkType != "" { - return nil - } else if opts.ReleaseImage == "" { - opts.NetworkType = string(hyperv1.OVNKubernetes) - return nil - } - - version, err := getReleaseSemanticVersion(ctx, opts, releaseProvider, readFile) - if err != nil { - return fmt.Errorf("failed to get version for release image %s: %w", opts.ReleaseImage, err) - } - if version.Minor > 10 { - opts.NetworkType = string(hyperv1.OVNKubernetes) - } else { - opts.NetworkType = string(hyperv1.OpenShiftSDN) - } - - return nil -} - -func getReleaseSemanticVersion(ctx context.Context, opts *CreateOptions, provider releaseinfo.Provider, readFile func(string) ([]byte, error)) (*semver.Version, error) { - var pullSecretBytes []byte - var err error - if len(opts.CredentialSecretName) > 0 { - pullSecretBytes, err = util.GetPullSecret(opts.CredentialSecretName, opts.Namespace) - if err != nil { - return nil, err - } - } - // overrides secret if set - if len(opts.PullSecretFile) > 0 { - pullSecretBytes, err = readFile(opts.PullSecretFile) - if err != nil { - return nil, fmt.Errorf("cannot read pull secret file %s: %w", opts.PullSecretFile, err) - } - - } - - releaseImage, err := provider.Lookup(ctx, opts.ReleaseImage, pullSecretBytes) - if err != nil { - return nil, fmt.Errorf("failed to get version information from %s: %w", opts.ReleaseImage, err) - } - semanticVersion, err := semver.Parse(releaseImage.Version()) - if err != nil { - return nil, err - } - return &semanticVersion, nil -} diff --git a/cmd/cluster/core/create_test.go b/cmd/cluster/core/create_test.go deleted file mode 100644 index b824c76bd9..0000000000 --- a/cmd/cluster/core/create_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package core - -import ( - "context" - "testing" - - "github.com/openshift/hypershift/support/releaseinfo" - "github.com/openshift/hypershift/support/releaseinfo/fake" -) - -func TestDefaultNetworkType(t *testing.T) { - testCases := []struct { - name string - opts *CreateOptions - provider releaseinfo.Provider - expected string - }{ - { - name: "Already configured, no change", - opts: &CreateOptions{ - NetworkType: "foo", - ReleaseImage: "4.11.0", - }, - expected: "foo", - }, - { - name: "4.10, SDN", - opts: &CreateOptions{ - ReleaseImage: "4.10.0", - }, - provider: &fake.FakeReleaseProvider{Version: "4.10.0"}, - expected: "OpenShiftSDN", - }, - { - name: "4.11, ovn-k", - opts: &CreateOptions{ - ReleaseImage: "4.11.0", - }, - provider: &fake.FakeReleaseProvider{Version: "4.11.0"}, - expected: "OVNKubernetes", - }, - { - name: "4.12, ovn-k", - opts: &CreateOptions{ - ReleaseImage: "4.12.0", - }, - provider: &fake.FakeReleaseProvider{Version: "4.12.0"}, - expected: "OVNKubernetes", - }, - } - - readFile := func(string) ([]byte, error) { return nil, nil } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - if err := defaultNetworkType(context.Background(), tc.opts, tc.provider, readFile); err != nil { - t.Fatalf("defaultNetworkType failed: %v", err) - } - if tc.opts.NetworkType != tc.expected { - t.Errorf("expected network type %s, got %s", tc.expected, tc.opts.NetworkType) - } - }) - } -} diff --git a/cmd/cluster/core/destroy.go b/cmd/cluster/core/destroy.go index 5a5ca0b488..70018a1648 100644 --- a/cmd/cluster/core/destroy.go +++ b/cmd/cluster/core/destroy.go @@ -10,6 +10,7 @@ import ( hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/cmd/util" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/types" @@ -99,10 +100,26 @@ func DestroyCluster(ctx context.Context, hostedCluster *hyperv1.HostedCluster, o // If the hosted cluster exists, add a finalizer, delete it, and wait for // the cluster to be cleaned up before destroying its infrastructure. if hostedClusterExists { + + original := hostedCluster.DeepCopy() if shouldDestroyPlatformSpecifics { - err = setFinalizer(ctx, hostedCluster, o, c) - if err != nil { - return err + setFinalizer(hostedCluster, o) + } + if o.DestroyCloudResources { + setDestroyCloudResourcesAnnotation(hostedCluster, o) + } + + // if the hostedcluster is needs to be modified during deletion, patch the + // hosted cluster before deleting it. + if !equality.Semantic.DeepEqual(&hostedCluster, original) { + if err := c.Patch(ctx, hostedCluster, client.MergeFrom(original)); err != nil { + if apierrors.IsNotFound(err) { + o.Log.Info("Hosted cluster not found, skipping client updates", "namespace", o.Namespace, "name", o.Name) + } else if !strings.Contains(err.Error(), "no new finalizers can be added if the object is being deleted") { + return fmt.Errorf("failed to add client finalizer to hosted cluster: %w", err) + } + } else { + o.Log.Info("Updated hosted cluster", "namespace", o.Namespace, "name", o.Name) } } @@ -212,32 +229,22 @@ func waitForRestOfFinalizers(ctx context.Context, hostedCluster *hyperv1.HostedC return nil } -func setFinalizer(ctx context.Context, hostedCluster *hyperv1.HostedCluster, o *DestroyOptions, c client.Client) error { - if sets.New[string](hostedCluster.Finalizers...).Has(destroyFinalizer) { - return nil +func setDestroyCloudResourcesAnnotation(hostedCluster *hyperv1.HostedCluster, o *DestroyOptions) { + if hostedCluster.Annotations == nil { + hostedCluster.Annotations = map[string]string{} } + hostedCluster.Annotations[hyperv1.CleanupCloudResourcesAnnotation] = "true" + o.Log.Info("Marking cleanup of cloud resources for hosted cluster", "namespace", hostedCluster.Namespace, "name", hostedCluster.Name) +} - original := hostedCluster.DeepCopy() +func setFinalizer(hostedCluster *hyperv1.HostedCluster, o *DestroyOptions) { + if sets.New[string](hostedCluster.Finalizers...).Has(destroyFinalizer) { + return + } if hostedCluster.DeletionTimestamp == nil { controllerutil.AddFinalizer(hostedCluster, destroyFinalizer) } - if o.DestroyCloudResources { - if hostedCluster.Annotations == nil { - hostedCluster.Annotations = map[string]string{} - } - hostedCluster.Annotations[hyperv1.CleanupCloudResourcesAnnotation] = "true" - } - if err := c.Patch(ctx, hostedCluster, client.MergeFrom(original)); err != nil { - if apierrors.IsNotFound(err) { - o.Log.Info("Hosted cluster not found, skipping finalizer update", "namespace", o.Namespace, "name", o.Name) - } else if !strings.Contains(err.Error(), "no new finalizers can be added if the object is being deleted") { - return fmt.Errorf("failed to add finalizer to hosted cluster: %w", err) - } - } else { - o.Log.Info("Updated finalizer for hosted cluster", "namespace", o.Namespace, "name", o.Name) - } - - return nil + o.Log.Info("Setting client finalizer for hosted cluster", "namespace", hostedCluster.Namespace, "name", hostedCluster.Name) } func waitForClusterDeletion(ctx context.Context, hostedCluster *hyperv1.HostedCluster, o *DestroyOptions, c client.Client) error { diff --git a/cmd/cluster/kubevirt/create.go b/cmd/cluster/kubevirt/create.go index f5ceb12874..147ff76500 100644 --- a/cmd/cluster/kubevirt/create.go +++ b/cmd/cluster/kubevirt/create.go @@ -65,6 +65,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command { cmd.Flags().StringVar(&opts.KubevirtPlatform.QoSClass, "qos-class", opts.KubevirtPlatform.QoSClass, `If "Guaranteed", set the limit cpu and memory of the VirtualMachineInstance, to be the same as the requested cpu and memory; supported values: "Burstable" and "Guaranteed"`) cmd.Flags().StringArrayVar(&opts.KubevirtPlatform.AdditionalNetworks, "additional-network", opts.KubevirtPlatform.AdditionalNetworks, fmt.Sprintf(`Specify additional network that should be attached to the nodes, the "name" field should point to a multus network attachment definition with the format "[namespace]/[name]", it can be specified multiple times to attach to multiple networks. Supported parameters: %s, example: "name:ns1/nad-foo`, params.Supported(NetworkOpts{}))) cmd.Flags().BoolVar(opts.KubevirtPlatform.AttachDefaultNetwork, "attach-default-network", *opts.KubevirtPlatform.AttachDefaultNetwork, `Specify if the default pod network should be attached to the nodes. This can only be set if --additional-network is configured`) + cmd.Flags().StringToStringVar(&opts.KubevirtPlatform.VmNodeSelector, "vm-node-selector", opts.KubevirtPlatform.VmNodeSelector, "A comma separated list of key=value pairs to use as the node selector for the KubeVirt VirtualMachines to be scheduled onto. (e.g. role=kubevirt,size=large)") cmd.MarkPersistentFlagRequired("pull-secret") @@ -208,6 +209,7 @@ func ApplyPlatformSpecificsValues(ctx context.Context, exampleOptions *apifixtur QoSClass: qosClass, AdditionalNetworks: additionalNetworks, AttachDefaultNetwork: opts.KubevirtPlatform.AttachDefaultNetwork, + VmNodeSelector: opts.KubevirtPlatform.VmNodeSelector, } if opts.BaseDomain != "" { diff --git a/cmd/infra/aws/ec2_sg.go b/cmd/infra/aws/ec2_sg.go index f783d944ec..6dacb75d80 100644 --- a/cmd/infra/aws/ec2_sg.go +++ b/cmd/infra/aws/ec2_sg.go @@ -62,7 +62,7 @@ func (o *CreateInfraOptions) CreateWorkerSecurityGroup(client ec2iface.EC2API, v securityGroupID := aws.StringValue(securityGroup.GroupId) sgUserID := aws.StringValue(securityGroup.OwnerId) egressPermissions := awsutil.DefaultWorkerSGEgressRules() - ingressPermissions := awsutil.DefaultWorkerSGIngressRules(DefaultCIDRBlock, securityGroupID, sgUserID) + ingressPermissions := awsutil.DefaultWorkerSGIngressRules([]string{DefaultCIDRBlock}, securityGroupID, sgUserID) var egressToAuthorize []*ec2.IpPermission var ingressToAuthorize []*ec2.IpPermission diff --git a/cmd/infra/aws/iam.go b/cmd/infra/aws/iam.go index e1cd2534c1..9495d9a02b 100644 --- a/cmd/infra/aws/iam.go +++ b/cmd/infra/aws/iam.go @@ -10,7 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/iam/iamiface" - jose "gopkg.in/square/go-jose.v2" + jose "gopkg.in/go-jose/go-jose.v2" "github.com/openshift/hypershift/cmd/log" ) @@ -202,6 +202,7 @@ const ( "ec2:DescribeSecurityGroups", "ec2:DescribeSubnets", "ec2:DescribeVpcs", + "ec2:DescribeDhcpOptions", "ec2:DescribeVpcAttribute", "ec2:DescribeVolumes", "ec2:DetachInternetGateway", @@ -527,6 +528,7 @@ func (o *CreateIAMOptions) CreateOIDCProvider(iamClient iamiface.IAMAPI) (string oidcOutput, err := iamClient.CreateOpenIDConnectProvider(&iam.CreateOpenIDConnectProviderInput{ ClientIDList: []*string{ aws.String("openshift"), + aws.String("sts.amazonaws.com"), }, // The AWS console mentions that this will be ignored for S3 buckets but creation fails if we don't // pass a thumbprint. diff --git a/cmd/infra/azure/create.go b/cmd/infra/azure/create.go index 179acea63b..4840398207 100644 --- a/cmd/infra/azure/create.go +++ b/cmd/infra/azure/create.go @@ -63,10 +63,12 @@ func NewCreateCommand() *cobra.Command { cmd.Flags().StringVar(&opts.BaseDomain, "base-domain", opts.BaseDomain, "The ingress base domain for the cluster") cmd.Flags().StringVar(&opts.Name, "name", opts.Name, "A name for the cluster") cmd.Flags().StringVar(&opts.OutputFile, "output-file", opts.OutputFile, "Path to file that will contain output information from infra resources (optional)") + cmd.Flags().StringVar(&opts.RHCOSImage, "rhcos-image", opts.RHCOSImage, `RHCOS image to be used for the NodePool. Could be obtained using podman run --rm -it --entrypoint cat $RELEASE_IMAGE release-manifests/0000_50_installer_coreos-bootimages.yaml | yq .data.stream -r | yq '.architectures.x86_64["rhel-coreos-extensions"]["azure-disk"].url'`) - cmd.MarkFlagRequired("infra-id") - cmd.MarkFlagRequired("azure-creds") - cmd.MarkFlagRequired("name") + _ = cmd.MarkFlagRequired("infra-id") + _ = cmd.MarkFlagRequired("azure-creds") + _ = cmd.MarkFlagRequired("name") + _ = cmd.MarkFlagRequired("rhcos-image") l := log.Log cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/infra/powervs/create.go b/cmd/infra/powervs/create.go index f9bd87e7b9..8b399ab699 100644 --- a/cmd/infra/powervs/create.go +++ b/cmd/infra/powervs/create.go @@ -344,10 +344,6 @@ func (infra *Infra) SetupInfra(ctx context.Context, options *CreateInfraOptions) return fmt.Errorf("error setup base domain: %w", err) } - if err = infra.setupSecrets(options); err != nil { - return fmt.Errorf("error setup secrets: %w", err) - } - gtag, err := globaltaggingv1.NewGlobalTaggingV1(&globaltaggingv1.GlobalTaggingV1Options{Authenticator: getIAMAuth()}) if err != nil { return err @@ -388,6 +384,11 @@ func (infra *Infra) SetupInfra(ctx context.Context, options *CreateInfraOptions) return fmt.Errorf("cloud connection is not up: %w", err) } + // setupSecrets need parameter cloudInstanceId, hence invoked after setupPowerVSCloudInstance + if err := infra.setupSecrets(options); err != nil { + return fmt.Errorf("error setup secrets: %w", err) + } + log(options.InfraID).Info("Setup infra completed in", "duration", time.Since(startTime).String()) return nil } @@ -395,21 +396,38 @@ func (infra *Infra) SetupInfra(ctx context.Context, options *CreateInfraOptions) // setupSecrets generate secrets for control plane components func (infra *Infra) setupSecrets(options *CreateInfraOptions) error { var err error + var powerVsCloudInstanceID string + + if options.CloudInstanceID != "" { + powerVsCloudInstanceID = options.CloudInstanceID + } else if infra.CloudInstanceID != "" { + powerVsCloudInstanceID = infra.CloudInstanceID + } else { + return fmt.Errorf("unable to limit access scope to instance level: cloud instance not found") + } if options.RecreateSecrets { - deleteSecrets(options.Name, options.Namespace, infra.AccountID, infra.ResourceGroupID) + deleteSecrets(options.Name, options.Namespace, powerVsCloudInstanceID, infra.AccountID, infra.ResourceGroupID) } log(infra.ID).Info("Creating Secrets ...") infra.Secrets = Secrets{} + kubeCloudControllerManagerCR, err = updateCRYaml(kubeCloudControllerManagerCR, "kubeCloudControllerManagerCRTemplate", powerVsCloudInstanceID) + if err != nil { + return fmt.Errorf("error updating kube cloud controller manager yaml: %w", err) + } infra.Secrets.KubeCloudControllerManager, err = setupServiceID(options.Name, cloudApiKey, infra.AccountID, infra.ResourceGroupID, kubeCloudControllerManagerCR, kubeCloudControllerManagerCreds, options.Namespace) if err != nil { return fmt.Errorf("error setup kube cloud controller manager secret: %w", err) } + nodePoolManagementCR, err = updateCRYaml(nodePoolManagementCR, "nodePoolManagementCRTemplate", powerVsCloudInstanceID) + if err != nil { + return fmt.Errorf("error updating nodepool management yaml: %w", err) + } infra.Secrets.NodePoolManagement, err = setupServiceID(options.Name, cloudApiKey, infra.AccountID, infra.ResourceGroupID, nodePoolManagementCR, nodePoolManagementCreds, options.Namespace) if err != nil { @@ -422,6 +440,10 @@ func (infra *Infra) setupSecrets(options *CreateInfraOptions) error { return fmt.Errorf("error setup ingress operator secret: %w", err) } + storageOperatorCR, err = updateCRYaml(storageOperatorCR, "storageOperatorCRTemplate", powerVsCloudInstanceID) + if err != nil { + return fmt.Errorf("error updating storage operator yaml: %w", err) + } infra.Secrets.StorageOperator, err = setupServiceID(options.Name, cloudApiKey, infra.AccountID, infra.ResourceGroupID, storageOperatorCR, storageOperatorCreds, options.Namespace) if err != nil { diff --git a/cmd/infra/powervs/destroy.go b/cmd/infra/powervs/destroy.go index 8b3734bfa0..ccf265efb3 100644 --- a/cmd/infra/powervs/destroy.go +++ b/cmd/infra/powervs/destroy.go @@ -180,11 +180,6 @@ func (options *DestroyInfraOptions) DestroyInfra(ctx context.Context, infra *Inf log(options.InfraID).Error(err, "error deleting dns record from cis domain") } - if err = deleteSecrets(options.Name, options.Namespace, accountID, resourceGroupID); err != nil { - errL = append(errL, fmt.Errorf("error deleting secrets: %w", err)) - log(options.InfraID).Error(err, "error deleting secrets") - } - if err = deleteCOS(ctx, options, resourceGroupID); err != nil { errL = append(errL, fmt.Errorf("error deleting cos buckets: %w", err)) log(options.InfraID).Error(err, "error deleting cos buckets") @@ -218,6 +213,11 @@ func (options *DestroyInfraOptions) DestroyInfra(ctx context.Context, infra *Inf } } + if err = deleteSecrets(options.Name, options.Namespace, powerVsCloudInstanceID, accountID, resourceGroupID); err != nil { + errL = append(errL, fmt.Errorf("error deleting secrets: %w", err)) + log(options.InfraID).Error(err, "error deleting secrets") + } + var session *ibmpisession.IBMPISession if !skipPowerVs { session, err = createPowerVSSession(accountID, options.Region, options.Zone, options.Debug) @@ -302,14 +302,23 @@ func deleteDNSRecords(ctx context.Context, options *DestroyInfraOptions) error { } // deleteSecrets delete secrets generated for control plane components -func deleteSecrets(name, namespace, accountID string, resourceGroupID string) error { +func deleteSecrets(name, namespace, cloudInstanceID string, accountID string, resourceGroupID string) error { + var e error + kubeCloudControllerManagerCR, e = updateCRYaml(kubeCloudControllerManagerCR, "kubeCloudControllerManagerCRTemplate", cloudInstanceID) + if e != nil { + return fmt.Errorf("error updating kube cloud controller manager yaml: %w", e) + } err := deleteServiceID(name, cloudApiKey, accountID, resourceGroupID, kubeCloudControllerManagerCR, kubeCloudControllerManagerCreds, namespace) if err != nil { return fmt.Errorf("error deleting kube cloud controller manager secret: %w", err) } + nodePoolManagementCR, e = updateCRYaml(nodePoolManagementCR, "nodePoolManagementCRTemplate", cloudInstanceID) + if e != nil { + return fmt.Errorf("error updating nodepool management yaml: %w", e) + } err = deleteServiceID(name, cloudApiKey, accountID, resourceGroupID, nodePoolManagementCR, nodePoolManagementCreds, namespace) if err != nil { @@ -322,6 +331,10 @@ func deleteSecrets(name, namespace, accountID string, resourceGroupID string) er return fmt.Errorf("error deleting ingress operator secret: %w", err) } + storageOperatorCR, e = updateCRYaml(storageOperatorCR, "storageOperatorCRTemplate", cloudInstanceID) + if e != nil { + return fmt.Errorf("error updating storage operator yaml: %w", e) + } err = deleteServiceID(name, cloudApiKey, accountID, resourceGroupID, storageOperatorCR, storageOperatorCreds, namespace) if err != nil { diff --git a/cmd/infra/powervs/service_id.go b/cmd/infra/powervs/service_id.go index 76108dbaa0..1a350d188c 100644 --- a/cmd/infra/powervs/service_id.go +++ b/cmd/infra/powervs/service_id.go @@ -1,10 +1,13 @@ package powervs import ( + "bytes" "fmt" + "strings" + "text/template" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/yaml" - "strings" "github.com/IBM/platform-services-go-sdk/iamidentityv1" @@ -13,6 +16,10 @@ import ( ccoibmcloud "github.com/openshift/cloud-credential-operator/pkg/ibmcloud" ) +type PolicyParams struct { + CloudInstanceID string +} + var kubeCloudControllerManagerCR = ` apiVersion: cloudcredential.openshift.io/v1 kind: CredentialsRequest @@ -44,6 +51,9 @@ spec: - attributes: - name: serviceName value: power-iaas + - name: serviceInstance + value: {{.CloudInstanceID}} + operator: stringEquals roles: - crn:v1:bluemix:public:iam::::role:Viewer - crn:v1:bluemix:public:iam::::serviceRole:Reader @@ -63,6 +73,9 @@ spec: - attributes: - name: serviceName value: power-iaas + - name: serviceInstance + value: {{.CloudInstanceID}} + operator: stringEquals roles: - crn:v1:bluemix:public:iam::::serviceRole:Manager - crn:v1:bluemix:public:iam::::role:Editor @@ -101,6 +114,9 @@ spec: - attributes: - name: serviceName value: power-iaas + - name: serviceInstance + value: {{.CloudInstanceID}} + operator: stringEquals roles: - crn:v1:bluemix:public:iam::::serviceRole:Manager - crn:v1:bluemix:public:iam::::role:Editor @@ -188,6 +204,23 @@ func deleteServiceID(name, APIKey, accountID, resourceGroupID, crYaml, secretRef return nil } +func updateCRYaml(crYaml, templateName string, serviceInstanceValue string) (string, error) { + params := PolicyParams{ + CloudInstanceID: serviceInstanceValue, + } + + tmpl, err := template.New(templateName).Parse(crYaml) + if err != nil { + return "", fmt.Errorf("failed to parse the template %s, err: %w", templateName, err) + } + + b := &bytes.Buffer{} + if err = tmpl.Execute(b, params); err != nil { + return "", fmt.Errorf("failed to execute %s: err: %w", templateName, err) + } + return b.String(), nil +} + func extractServiceIDFromCRN(crn string) string { crnL := strings.Split(crn, ":") return crnL[len(crnL)-1] diff --git a/cmd/infra/powervs/service_id_test.go b/cmd/infra/powervs/service_id_test.go index e796de3bcb..07b9408ca6 100644 --- a/cmd/infra/powervs/service_id_test.go +++ b/cmd/infra/powervs/service_id_test.go @@ -52,6 +52,7 @@ func TestCreateServiceIDClient(t *testing.T) { t.Run(name, func(t *testing.T) { g := NewGomegaWithT(t) + test.input.crYaml, _ = updateCRYaml(test.input.crYaml, "crYaml_template", "cloud_ins_id_1234") _, err := createServiceIDClient(test.input.name, test.input.apiKey, test.input.account, test.input.resourceGroupID, test.input.crYaml, test.input.secretRefName, test.input.secretRefNamespace) if test.errExpected { g.Expect(err).ToNot(BeNil()) diff --git a/cmd/install/assets/hypershift-operator/certificates.hypershift.openshift.io_certificaterevocationrequests.yaml b/cmd/install/assets/hypershift-operator/certificates.hypershift.openshift.io_certificaterevocationrequests.yaml new file mode 100644 index 0000000000..f56936f520 --- /dev/null +++ b/cmd/install/assets/hypershift-operator/certificates.hypershift.openshift.io_certificaterevocationrequests.yaml @@ -0,0 +1,157 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + name: certificaterevocationrequests.certificates.hypershift.openshift.io +spec: + group: certificates.hypershift.openshift.io + names: + kind: CertificateRevocationRequest + listKind: CertificateRevocationRequestList + plural: certificaterevocationrequests + shortNames: + - crr + - crrs + singular: certificaterevocationrequest + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: CertificateRevocationRequest defines the desired state of CertificateRevocationRequest. + A request denotes the user's desire to revoke a signer certificate of the + class indicated in spec. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateRevocationRequestSpec defines the desired state + of CertificateRevocationRequest + properties: + signerClass: + description: SignerClass identifies the class of signer to revoke. + All the active signing CAs for the signer class will be revoked. + enum: + - customer-break-glass + - sre-break-glass + type: string + x-kubernetes-validations: + - message: signerClass is immutable + rule: self == oldSelf + required: + - signerClass + type: object + status: + description: CertificateRevocationRequestStatus defines the observed state + of CertificateRevocationRequest + properties: + conditions: + description: Conditions contain details about the various aspects + of certificate revocation. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + previousSigner: + description: PreviousSigner stores a reference to the previous signer + certificate. We require storing this data to ensure that we can + validate that the old signer is no longer valid before considering + revocation complete. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + revocationTimestamp: + description: RevocationTimestamp is the cut-off time for signing CAs + to be revoked. All certificates that are valid before this time + will be revoked; all re-generated certificates will not be valid + at or before this time. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/cmd/install/assets/hypershift-operator/certificates.hypershift.openshift.io_certificatesigningrequestapprovals.yaml b/cmd/install/assets/hypershift-operator/certificates.hypershift.openshift.io_certificatesigningrequestapprovals.yaml new file mode 100644 index 0000000000..c9a61b7a8c --- /dev/null +++ b/cmd/install/assets/hypershift-operator/certificates.hypershift.openshift.io_certificatesigningrequestapprovals.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + name: certificatesigningrequestapprovals.certificates.hypershift.openshift.io +spec: + group: certificates.hypershift.openshift.io + names: + kind: CertificateSigningRequestApproval + listKind: CertificateSigningRequestApprovalList + plural: certificatesigningrequestapprovals + shortNames: + - csra + - csras + singular: certificatesigningrequestapproval + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: CertificateSigningRequestApproval defines the desired state of + CertificateSigningRequestApproval + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateSigningRequestApprovalSpec defines the desired + state of CertificateSigningRequestApproval + type: object + status: + description: CertificateSigningRequestApprovalStatus defines the observed + state of CertificateSigningRequestApproval + type: object + type: object + served: true + storage: true diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_certificatesigningrequestapprovals.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_certificatesigningrequestapprovals.yaml new file mode 100644 index 0000000000..618a3a4193 --- /dev/null +++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_certificatesigningrequestapprovals.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + name: certificatesigningrequestapprovals.hypershift.openshift.io +spec: + group: hypershift.openshift.io + names: + kind: CertificateSigningRequestApproval + listKind: CertificateSigningRequestApprovalList + plural: certificatesigningrequestapprovals + shortNames: + - csra + - csras + singular: certificatesigningrequestapproval + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CertificateSigningRequestApproval defines the desired state of + CertificateSigningRequestApproval + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateSigningRequestApprovalSpec defines the desired + state of CertificateSigningRequestApproval + type: object + status: + description: CertificateSigningRequestApprovalStatus defines the observed + state of CertificateSigningRequestApproval + type: object + type: object + served: true + storage: true diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml index 17565e3f73..fecaec64ff 100644 --- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml +++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedclusters.yaml @@ -588,7 +588,8 @@ spec: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -2029,6 +2030,47 @@ spec: type: integer type: object type: object + operatorhub: + description: OperatorHub specifies the configuration for the Operator + Lifecycle Manager in the HostedCluster. This is only configured + at deployment time but the controller are not reconcilling over + it. The OperatorHub configuration will be constantly reconciled + if catalog placement is management, but only on cluster creation + otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and + their configuration. If the list is empty, it implies that + the default hub sources are enabled on the cluster unless + disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default hub + source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. @@ -4704,7 +4746,8 @@ spec: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -6126,6 +6169,47 @@ spec: rule: '!has(self.tokenConfig) || !has(self.tokenConfig.accessTokenInactivityTimeout) || duration(self.tokenConfig.accessTokenInactivityTimeout).getSeconds() >= 300' + operatorhub: + description: OperatorHub specifies the configuration for the Operator + Lifecycle Manager in the HostedCluster. This is only configured + at deployment time but the controller are not reconcilling over + it. The OperatorHub configuration will be constantly reconciled + if catalog placement is management, but only on cluster creation + otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and + their configuration. If the list is empty, it implies that + the default hub sources are enabled on the cluster unless + disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default hub + source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml index c9d47f18f0..7a61395811 100644 --- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml +++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml @@ -574,7 +574,8 @@ spec: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -2015,6 +2016,47 @@ spec: type: integer type: object type: object + operatorhub: + description: OperatorHub specifies the configuration for the Operator + Lifecycle Manager in the HostedCluster. This is only configured + at deployment time but the controller are not reconcilling over + it. The OperatorHub configuration will be constantly reconciled + if catalog placement is management, but only on cluster creation + otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and + their configuration. If the list is empty, it implies that + the default hub sources are enabled on the cluster unless + disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default hub + source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. @@ -2157,6 +2199,9 @@ spec: policy applied to critical control plane components. The default value is SingleReplica. type: string + x-kubernetes-validations: + - message: ControllerAvailabilityPolicy is immutable + rule: self == oldSelf dns: description: DNSSpec specifies the DNS configuration in the cluster. properties: @@ -4678,7 +4723,8 @@ spec: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -6100,6 +6146,47 @@ spec: rule: '!has(self.tokenConfig) || !has(self.tokenConfig.accessTokenInactivityTimeout) || duration(self.tokenConfig.accessTokenInactivityTimeout).getSeconds() >= 300' + operatorhub: + description: OperatorHub specifies the configuration for the Operator + Lifecycle Manager in the HostedCluster. This is only configured + at deployment time but the controller are not reconcilling over + it. The OperatorHub configuration will be constantly reconciled + if catalog placement is management, but only on cluster creation + otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and + their configuration. If the list is empty, it implies that + the default hub sources are enabled on the cluster unless + disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default hub + source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. @@ -6226,6 +6313,9 @@ spec: policy applied to critical control plane components. The default value is SingleReplica. type: string + x-kubernetes-validations: + - message: ControllerAvailabilityPolicy is immutable + rule: self == oldSelf dns: description: DNSSpec specifies the DNS configuration in the cluster. properties: diff --git a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml index 1ccfb0e720..c15dcc8b69 100644 --- a/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml +++ b/cmd/install/assets/hypershift-operator/hypershift.openshift.io_nodepools.yaml @@ -591,6 +591,14 @@ spec: - Enable - Disable type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the kubevirt VirtualMachine to fit on a node. Selector + which must match a node''s labels for the VM to be scheduled + on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object rootVolume: default: persistent: @@ -1562,6 +1570,14 @@ spec: - Enable - Disable type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the kubevirt VirtualMachine to fit on a node. Selector + which must match a node''s labels for the VM to be scheduled + on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object rootVolume: default: persistent: diff --git a/cmd/install/assets/hypershift_operator.go b/cmd/install/assets/hypershift_operator.go index 61c6e64de0..f24de6496f 100644 --- a/cmd/install/assets/hypershift_operator.go +++ b/cmd/install/assets/hypershift_operator.go @@ -47,6 +47,14 @@ var ( // allowPrivilegeEscalation is used to set the status of the // privilegeEscalation on SeccompProfile allowPrivilegeEscalation = false + + // readOnlyRootFilesystem is used to set the container security + // context to mount the root filesystem as read-only. + readOnlyRootFilesystem = true + + // privileged is used to set the container security + // context to run container as unprivileged. + privileged = false ) type HyperShiftNamespace struct { @@ -82,7 +90,8 @@ const ( oidcProviderS3CredsSecretName = "hypershift-operator-oidc-provider-s3-credentials" externaDNSCredsSecretName = "external-dns-credentials" - HypershiftOperatorName = "operator" + HypershiftOperatorName = "operator" + HyperShiftInstallCLIVersionAnnotation = "hypershift.openshift.io/install-cli-version" ) type HyperShiftOperatorCredentialsSecret struct { @@ -211,6 +220,7 @@ func (o ExternalDNSDeployment) Build() *appsv1.Deployment { fmt.Sprintf("--txt-owner-id=%s", txtOwnerId), fmt.Sprintf("--label-filter=%s!=%s", hyperv1.RouteVisibilityLabel, hyperv1.RouteVisibilityPrivate), "--interval=1m", + "--txt-cache-interval=1h", }, Ports: []corev1.ContainerPort{{Name: "metrics", ContainerPort: 7979}}, LivenessProbe: &corev1.Probe{ @@ -233,6 +243,10 @@ func (o ExternalDNSDeployment) Build() *appsv1.Deployment { corev1.ResourceCPU: resource.MustParse("5m"), }, }, + SecurityContext: &corev1.SecurityContext{ + ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + Privileged: &privileged, + }, VolumeMounts: []corev1.VolumeMount{ { Name: "credentials", @@ -273,6 +287,7 @@ func (o ExternalDNSDeployment) Build() *appsv1.Deployment { deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, "--aws-zone-type=public", "--aws-batch-change-interval=10s", + "--aws-zones-cache-duration=1h", ) case "azure": deployment.Spec.Template.Spec.Containers[0].Args = append(deployment.Spec.Template.Spec.Containers[0].Args, @@ -329,6 +344,7 @@ type HyperShiftOperatorDeployment struct { MonitoringDashboards bool CertRotationScale time.Duration EnableCVOManagementClusterMetricsAccess bool + EnableDedicatedRequestServingIsolation bool } func (o HyperShiftOperatorDeployment) Build() *appsv1.Deployment { @@ -337,6 +353,7 @@ func (o HyperShiftOperatorDeployment) Build() *appsv1.Deployment { "--namespace=$(MY_NAMESPACE)", "--pod-name=$(MY_NAME)", "--metrics-addr=:9000", + fmt.Sprintf("--enable-dedicated-request-serving-isolation=%t", o.EnableDedicatedRequestServingIsolation), fmt.Sprintf("--enable-ocp-cluster-monitoring=%t", o.EnableOCPClusterMonitoring), fmt.Sprintf("--enable-ci-debug-output=%t", o.EnableCIDebugOutput), fmt.Sprintf("--private-platform=%s", o.PrivatePlatform), @@ -562,7 +579,9 @@ func (o HyperShiftOperatorDeployment) Build() *appsv1.Deployment { Command: []string{"/usr/bin/hypershift-operator"}, Args: []string{"init"}, SecurityContext: &corev1.SecurityContext{ - RunAsUser: k8sutilspointer.Int64(1000), + RunAsUser: k8sutilspointer.Int64(1000), + ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + Privileged: &privileged, }, VolumeMounts: initVolumeMounts, }, @@ -582,6 +601,8 @@ func (o HyperShiftOperatorDeployment) Build() *appsv1.Deployment { SeccompProfile: &corev1.SeccompProfile{ Type: corev1.SeccompProfileTypeRuntimeDefault, }, + ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + Privileged: &privileged, }, Image: image, ImagePullPolicy: corev1.PullIfNotPresent, @@ -645,7 +666,7 @@ func (o HyperShiftOperatorDeployment) Build() *appsv1.Deployment { if o.IncludeVersion { deployment.Annotations = map[string]string{ - "hypershift.openshift.io/install-cli-version": version.String(), + HyperShiftInstallCLIVersionAnnotation: version.String(), } } @@ -851,6 +872,11 @@ func (o HyperShiftOperatorClusterRole) Build() *rbacv1.ClusterRole { Resources: []string{"*"}, Verbs: []string{"*"}, }, + { + APIGroups: []string{"certificates.hypershift.openshift.io"}, + Resources: []string{rbacv1.ResourceAll}, + Verbs: []string{rbacv1.VerbAll}, + }, { APIGroups: []string{"config.openshift.io"}, Resources: []string{"*"}, @@ -930,6 +956,7 @@ func (o HyperShiftOperatorClusterRole) Build() *rbacv1.ClusterRole { "update", "patch", "delete", + "deletecollection", }, }, { @@ -1020,6 +1047,33 @@ func (o HyperShiftOperatorClusterRole) Build() *rbacv1.ClusterRole { Verbs: []string{"delete"}, ResourceNames: []string{hyperv1.GroupVersion.Group}, }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests/status"}, + Verbs: []string{"patch"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests/approval"}, + Verbs: []string{"update"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"signers"}, + Verbs: []string{"approve"}, + // we can't specify a signer domain with ResourceNames (or even *): https://github.com/kubernetes/kubernetes/issues/122154 + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"signers"}, + Verbs: []string{"sign"}, + // we can't specify a signer domain with ResourceNames (or even *): https://github.com/kubernetes/kubernetes/issues/122154 + }, }, } if o.EnableCVOManagementClusterMetricsAccess { @@ -1130,9 +1184,10 @@ func HyperShiftControlPlanePriorityClass() *schedulingv1.PriorityClass { ObjectMeta: metav1.ObjectMeta{ Name: DefaultPriorityClass, }, - Value: 100000000, - GlobalDefault: false, - Description: "This priority class should be used for hypershift control plane pods not critical to serving the API.", + Value: 100000000, + GlobalDefault: false, + Description: "This priority class should be used for hypershift control plane pods not critical to serving the API.", + PreemptionPolicy: ptr.To(corev1.PreemptNever), } } @@ -1145,9 +1200,10 @@ func HyperShiftAPICriticalPriorityClass() *schedulingv1.PriorityClass { ObjectMeta: metav1.ObjectMeta{ Name: APICriticalPriorityClass, }, - Value: 100001000, - GlobalDefault: false, - Description: "This priority class should be used for hypershift control plane pods critical to serving the API.", + Value: 100001000, + GlobalDefault: false, + Description: "This priority class should be used for hypershift control plane pods critical to serving the API.", + PreemptionPolicy: ptr.To(corev1.PreemptNever), } } @@ -1160,9 +1216,10 @@ func HyperShiftEtcdPriorityClass() *schedulingv1.PriorityClass { ObjectMeta: metav1.ObjectMeta{ Name: EtcdPriorityClass, }, - Value: 100002000, - GlobalDefault: false, - Description: "This priority class should be used for hypershift etcd pods.", + Value: 100002000, + GlobalDefault: false, + Description: "This priority class should be used for hypershift etcd pods.", + PreemptionPolicy: ptr.To(corev1.PreemptNever), } } diff --git a/cmd/install/assets/hypershift_operator_test.go b/cmd/install/assets/hypershift_operator_test.go index a657dbd267..e4e55abc35 100644 --- a/cmd/install/assets/hypershift_operator_test.go +++ b/cmd/install/assets/hypershift_operator_test.go @@ -42,6 +42,7 @@ func TestHyperShiftOperatorDeployment_Build(t *testing.T) { "--namespace=$(MY_NAMESPACE)", "--pod-name=$(MY_NAME)", "--metrics-addr=:9000", + fmt.Sprintf("--enable-dedicated-request-serving-isolation=%t", false), fmt.Sprintf("--enable-ocp-cluster-monitoring=%t", false), fmt.Sprintf("--enable-ci-debug-output=%t", false), fmt.Sprintf("--private-platform=%s", string(hyperv1.NonePlatform)), @@ -91,6 +92,7 @@ func TestHyperShiftOperatorDeployment_Build(t *testing.T) { "--namespace=$(MY_NAMESPACE)", "--pod-name=$(MY_NAME)", "--metrics-addr=:9000", + fmt.Sprintf("--enable-dedicated-request-serving-isolation=%t", false), fmt.Sprintf("--enable-ocp-cluster-monitoring=%t", false), fmt.Sprintf("--enable-ci-debug-output=%t", false), fmt.Sprintf("--private-platform=%s", string(hyperv1.NonePlatform)), @@ -125,6 +127,7 @@ func TestHyperShiftOperatorDeployment_Build(t *testing.T) { "--namespace=$(MY_NAMESPACE)", "--pod-name=$(MY_NAME)", "--metrics-addr=:9000", + fmt.Sprintf("--enable-dedicated-request-serving-isolation=%t", false), fmt.Sprintf("--enable-ocp-cluster-monitoring=%t", false), fmt.Sprintf("--enable-ci-debug-output=%t", false), fmt.Sprintf("--private-platform=%s", string(hyperv1.NonePlatform)), @@ -185,6 +188,7 @@ func TestHyperShiftOperatorDeployment_Build(t *testing.T) { "--namespace=$(MY_NAMESPACE)", "--pod-name=$(MY_NAME)", "--metrics-addr=:9000", + fmt.Sprintf("--enable-dedicated-request-serving-isolation=%t", false), fmt.Sprintf("--enable-ocp-cluster-monitoring=%t", false), fmt.Sprintf("--enable-ci-debug-output=%t", false), fmt.Sprintf("--private-platform=%s", string(hyperv1.AWSPlatform)), @@ -240,6 +244,66 @@ func TestHyperShiftOperatorDeployment_Build(t *testing.T) { }, }, }, + "specify dedicated request serving isolation parameter (true) result in appropriate arguments": { + inputBuildParameters: HyperShiftOperatorDeployment{ + Namespace: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + }, + OperatorImage: testOperatorImage, + ServiceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hypershift", + }, + }, + Replicas: 3, + PrivatePlatform: string(hyperv1.NonePlatform), + EnableDedicatedRequestServingIsolation: true, + }, + expectedVolumeMounts: nil, + expectedVolumes: nil, + expectedArgs: []string{ + "run", + "--namespace=$(MY_NAMESPACE)", + "--pod-name=$(MY_NAME)", + "--metrics-addr=:9000", + fmt.Sprintf("--enable-dedicated-request-serving-isolation=%t", true), + fmt.Sprintf("--enable-ocp-cluster-monitoring=%t", false), + fmt.Sprintf("--enable-ci-debug-output=%t", false), + fmt.Sprintf("--private-platform=%s", string(hyperv1.NonePlatform)), + }, + }, + "specify dedicated request serving isolation parameter (false) result in appropriate arguments": { + inputBuildParameters: HyperShiftOperatorDeployment{ + Namespace: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testNamespace, + }, + }, + OperatorImage: testOperatorImage, + ServiceAccount: &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hypershift", + }, + }, + Replicas: 3, + PrivatePlatform: string(hyperv1.NonePlatform), + EnableDedicatedRequestServingIsolation: false, + }, + expectedVolumeMounts: nil, + expectedVolumes: nil, + expectedArgs: []string{ + "run", + "--namespace=$(MY_NAMESPACE)", + "--pod-name=$(MY_NAME)", + "--metrics-addr=:9000", + fmt.Sprintf("--enable-dedicated-request-serving-isolation=%t", false), + fmt.Sprintf("--enable-ocp-cluster-monitoring=%t", false), + fmt.Sprintf("--enable-ci-debug-output=%t", false), + fmt.Sprintf("--private-platform=%s", string(hyperv1.NonePlatform)), + }, + }, } for name, test := range tests { t.Run(name, func(t *testing.T) { diff --git a/cmd/install/install.go b/cmd/install/install.go index f953e2df42..d0fe8773ae 100644 --- a/cmd/install/install.go +++ b/cmd/install/install.go @@ -93,6 +93,7 @@ type Options struct { SLOsAlerts bool MonitoringDashboards bool CertRotationScale time.Duration + EnableDedicatedRequestServingIsolation bool } func (o *Options) Validate() error { @@ -208,6 +209,7 @@ func NewCommand() *cobra.Command { cmd.PersistentFlags().BoolVar(&opts.SLOsAlerts, "slos-alerts", opts.SLOsAlerts, "If true, HyperShift will generate and use the prometheus alerts for monitoring HostedCluster and NodePools") cmd.PersistentFlags().BoolVar(&opts.MonitoringDashboards, "monitoring-dashboards", opts.MonitoringDashboards, "If true, HyperShift will generate a monitoring dashboard for every HostedCluster that it creates") cmd.PersistentFlags().DurationVar(&opts.CertRotationScale, "cert-rotation-scale", opts.CertRotationScale, "The scaling factor for certificate rotation. It is not supported to set this to anything other than 24h.") + cmd.PersistentFlags().BoolVar(&opts.EnableDedicatedRequestServingIsolation, "enable-dedicated-request-serving-isolation", true, "If true, enables scheduling of request serving components to dedicated nodes") cmd.RunE = func(cmd *cobra.Command, args []string) error { opts.ApplyDefaults() @@ -604,6 +606,7 @@ func hyperShiftOperatorManifests(opts Options) ([]crclient.Object, error) { MonitoringDashboards: opts.MonitoringDashboards, CertRotationScale: opts.CertRotationScale, EnableCVOManagementClusterMetricsAccess: opts.EnableCVOManagementClusterMetricsAccess, + EnableDedicatedRequestServingIsolation: opts.EnableDedicatedRequestServingIsolation, }.Build() objects = append(objects, operatorDeployment) diff --git a/cmd/nodepool/core/create.go b/cmd/nodepool/core/create.go index 1fd71bb0dc..278a6fde90 100644 --- a/cmd/nodepool/core/create.go +++ b/cmd/nodepool/core/create.go @@ -72,6 +72,26 @@ func (o *CreateNodePoolOptions) CreateNodePool(ctx context.Context, platformOpts releaseImage = hcluster.Spec.Release.Image } + // Set default upgrade type when the flag is empty + if o.NodeUpgradeType == "" { + switch hcluster.Spec.Platform.Type { + case hyperv1.AWSPlatform: + o.NodeUpgradeType = hyperv1.UpgradeTypeReplace + case hyperv1.KubevirtPlatform: + o.NodeUpgradeType = hyperv1.UpgradeTypeReplace + case hyperv1.NonePlatform: + o.NodeUpgradeType = hyperv1.UpgradeTypeInPlace + case hyperv1.AgentPlatform: + o.NodeUpgradeType = hyperv1.UpgradeTypeInPlace + case hyperv1.AzurePlatform: + o.NodeUpgradeType = hyperv1.UpgradeTypeReplace + case hyperv1.PowerVSPlatform: + o.NodeUpgradeType = hyperv1.UpgradeTypeReplace + default: + panic("Unsupported platform") + } + } + nodePool = &hyperv1.NodePool{ TypeMeta: metav1.TypeMeta{ Kind: "NodePool", diff --git a/cmd/version/version.go b/cmd/version/version.go index ec722b96d2..e33226f8fb 100644 --- a/cmd/version/version.go +++ b/cmd/version/version.go @@ -7,8 +7,12 @@ import ( "net/http" "strings" + "github.com/openshift/hypershift/cmd/util" + manifests "github.com/openshift/hypershift/hypershift-operator/controllers/manifests/supportedversion" + "github.com/openshift/hypershift/hypershift-operator/controllers/supportedversion" "github.com/openshift/hypershift/pkg/version" "github.com/spf13/cobra" + crclient "sigs.k8s.io/controller-runtime/pkg/client" ) var ( @@ -61,7 +65,8 @@ func LookupDefaultOCPVersion(releaseStream string) (OCPVersion, error) { } func NewVersionCommand() *cobra.Command { - var commitOnly bool + var commitOnly, clientOnly bool + namespace := "hypershift" cmd := &cobra.Command{ Use: "version", Short: "Prints HyperShift CLI version", @@ -71,9 +76,40 @@ func NewVersionCommand() *cobra.Command { fmt.Printf("%s\n", version.GetRevision()) return } - fmt.Printf("%s\n", version.String()) + fmt.Printf("Client Version: %s\n", version.String()) + if clientOnly { + return + } + client, err := util.GetClient() + if err != nil { + fmt.Printf("failed to connect to server: %v\n", err) + return + } + + supportedVersions := manifests.ConfigMap(namespace) + if err := client.Get(cmd.Context(), crclient.ObjectKeyFromObject(supportedVersions), supportedVersions); err != nil { + fmt.Printf("failed to find supported versions on the server: %v\n", err) + return + } + if serverVersion, present := supportedVersions.Data[supportedversion.ConfigMapServerVersionKey]; present { + fmt.Printf("Server Version: %s\n", serverVersion) + } else { + fmt.Println("The server did not advertise its HyperShift version.") + } + if supportedVersionData, present := supportedVersions.Data[supportedversion.ConfigMapVersionsKey]; present { + var versions supportedversion.SupportedVersions + if err := json.Unmarshal([]byte(supportedVersionData), &versions); err != nil { + fmt.Printf("failed to parse supported versions on the server: %v\n", err) + return + } + fmt.Printf("Server Supports OCP Versions: %s\n", strings.Join(versions.Versions, ", ")) + } else { + fmt.Println("The server did not advertise supported OCP versions.") + } }, } cmd.Flags().BoolVar(&commitOnly, "commit-only", commitOnly, "Output only the code commit") + cmd.Flags().BoolVar(&clientOnly, "client-only", clientOnly, "Output only the client version") + cmd.Flags().StringVar(&namespace, "namespace", namespace, "The namespace in which HyperShift is installed") return cmd } diff --git a/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go index 500f6c80e0..9d64884763 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/autoscaler/reconcile.go @@ -21,8 +21,27 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + autoscalerName = "cluster-autoscaler" +) + func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.HostedControlPlane, sa *corev1.ServiceAccount, kubeConfigSecret *corev1.Secret, options hyperv1.ClusterAutoscaling, clusterAutoscalerImage, availabilityProberImage string, setDefaultSecurityContext bool, ownerRef config.OwnerRef) error { ownerRef.ApplyTo(deployment) + + autoscalerResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("60Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + }, + } + // preserve existing resource requirements + mainContainer := util.FindContainer(autoscalerName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + autoscalerResources = mainContainer.Resources + } + } + args := []string{ "--cloud-provider=clusterapi", "--node-group-auto-discovery=clusterapi:namespace=$(MY_NAMESPACE)", @@ -69,17 +88,16 @@ func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.H } labels := map[string]string{ - "app": "cluster-autoscaler", - hyperv1.ControlPlaneComponent: "cluster-autoscaler", + "app": autoscalerName, + hyperv1.ControlPlaneComponent: autoscalerName, } // The selector needs to be invariant for the lifecycle of the project as it's an immutable field, // otherwise changing would prevent an upgrade from happening. selector := map[string]string{ - "app": "cluster-autoscaler", + "app": autoscalerName, } deployment.Spec = appsv1.DeploymentSpec{ - Replicas: k8sutilspointer.Int32(1), Selector: &metav1.LabelSelector{ MatchLabels: selector, }, @@ -116,7 +134,7 @@ func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.H }, Containers: []corev1.Container{ { - Name: "cluster-autoscaler", + Name: autoscalerName, Image: clusterAutoscalerImage, ImagePullPolicy: corev1.PullIfNotPresent, VolumeMounts: []corev1.VolumeMount{ @@ -135,14 +153,9 @@ func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.H }, }, }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("60Mi"), - corev1.ResourceCPU: resource.MustParse("10m"), - }, - }, - Command: []string{"/usr/bin/cluster-autoscaler"}, - Args: args, + Resources: autoscalerResources, + Command: []string{"/usr/bin/cluster-autoscaler"}, + Args: args, LivenessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -178,7 +191,7 @@ func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.H }, } - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(hcp.Spec.Platform.Type), availabilityProberImage, &deployment.Spec.Template.Spec) deploymentConfig := config.DeploymentConfig{ AdditionalLabels: map[string]string{ @@ -193,7 +206,11 @@ func ReconcileAutoscalerDeployment(deployment *appsv1.Deployment, hcp *hyperv1.H deploymentConfig.Scheduling.PriorityClass = hcp.Annotations[hyperv1.ControlPlanePriorityClass] } - deploymentConfig.SetDefaults(hcp, nil, k8sutilspointer.Int(1)) + replicas := k8sutilspointer.Int(1) + if _, exists := hcp.Annotations[hyperv1.DisableClusterAutoscalerAnnotation]; exists { + replicas = k8sutilspointer.Int(0) + } + deploymentConfig.SetDefaults(hcp, nil, replicas) deploymentConfig.SetRestartAnnotation(hcp.ObjectMeta) deploymentConfig.ApplyTo(deployment) diff --git a/control-plane-operator/controllers/hostedcontrolplane/cco/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/cco/reconcile.go new file mode 100644 index 0000000000..60de807bc8 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/cco/reconcile.go @@ -0,0 +1,171 @@ +package cco + +import ( + "path" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/pointer" + + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/imageprovider" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/proxy" + "github.com/openshift/hypershift/support/util" +) + +const ( + WorkerNamespace = "openshift-cloud-credential-operator" + WorkerServiceAccount = "cloud-credential-operator" +) + +func selectorLabels() map[string]string { + return map[string]string{ + "app": "cloud-credential-operator", + hyperv1.ControlPlaneComponent: "cloud-credential-operator", + } +} + +var ( + volumeMounts = util.PodVolumeMounts{ + containerMain().Name: { + volumeServiceAccountKubeconfig().Name: "/etc/kubernetes", + }, + } +) + +type Params struct { + operatorImage string + kubeRbacProxyImage string + availabilityProberImage string + + deploymentConfig config.DeploymentConfig + releaseVersion string + issuerURL string + apiPort *int32 + + config.OwnerRef +} + +func NewParams(hcp *hyperv1.HostedControlPlane, version string, releaseImageProvider *imageprovider.ReleaseImageProvider, setDefaultSecurityContext bool) Params { + params := Params{ + operatorImage: releaseImageProvider.GetImage("cloud-credential-operator"), + kubeRbacProxyImage: releaseImageProvider.GetImage("kube-rbac-proxy"), + availabilityProberImage: releaseImageProvider.GetImage(util.AvailabilityProberImageName), + releaseVersion: version, + issuerURL: hcp.Spec.IssuerURL, + OwnerRef: config.OwnerRefFrom(hcp), + apiPort: pointer.Int32(util.KASPodPort(hcp)), + deploymentConfig: config.DeploymentConfig{ + Scheduling: config.Scheduling{ + PriorityClass: config.DefaultPriorityClass, + }, + SetDefaultSecurityContext: setDefaultSecurityContext, + Resources: config.ResourcesSpec{ + containerMain().Name: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("75Mi"), + }, + }, + }, + }, + } + params.deploymentConfig.SetRestartAnnotation(hcp.ObjectMeta) + if hcp.Annotations[hyperv1.ControlPlanePriorityClass] != "" { + params.deploymentConfig.Scheduling.PriorityClass = hcp.Annotations[hyperv1.ControlPlanePriorityClass] + } + params.deploymentConfig.SetDefaults(hcp, selectorLabels(), pointer.Int(1)) + params.deploymentConfig.SetReleaseImageAnnotation(hcp.Spec.ReleaseImage) + return params +} + +func ReconcileDeployment(deployment *appsv1.Deployment, params Params, platformType hyperv1.PlatformType) error { + params.OwnerRef.ApplyTo(deployment) + deployment.Spec = appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: selectorLabels(), + }, + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: selectorLabels(), + }, + Spec: corev1.PodSpec{ + AutomountServiceAccountToken: pointer.Bool(false), + Containers: []corev1.Container{ + util.BuildContainer(containerMain(), buildMainContainer(params.operatorImage, params.releaseVersion)), + }, + Volumes: []corev1.Volume{ + util.BuildVolume(volumeServiceAccountKubeconfig(), buildVolumeServiceAccountKubeconfig), + }, + }, + }, + } + + params.deploymentConfig.ApplyTo(deployment) + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), params.availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + o.KubeconfigVolumeName = volumeServiceAccountKubeconfig().Name + o.WaitForInfrastructureResource = true + o.RequiredAPIs = []schema.GroupVersionKind{ + {Group: "operator.openshift.io", Version: "v1", Kind: "CloudCredential"}, + } + }) + return nil +} + +func containerMain() *corev1.Container { + return &corev1.Container{ + Name: "cloud-credential-operator", + } +} + +func buildMainContainer(image, releaseVersion string) func(*corev1.Container) { + return func(c *corev1.Container) { + c.Image = image + c.Command = []string{ + "/usr/bin/cloud-credential-operator", + } + c.Args = []string{ + "operator", + "--kubeconfig=" + path.Join(volumeMounts.Path(containerMain().Name, volumeServiceAccountKubeconfig().Name), util.KubeconfigKey), + } + c.Env = []corev1.EnvVar{ + { + Name: "RELEASE_VERSION", + Value: releaseVersion, + }, + { + Name: "KUBECONFIG", + Value: path.Join(volumeMounts.Path(containerMain().Name, volumeServiceAccountKubeconfig().Name), util.KubeconfigKey), + }, + } + proxy.SetEnvVars(&c.Env) + c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) + c.TerminationMessagePolicy = corev1.TerminationMessageFallbackToLogsOnError + c.SecurityContext = &corev1.SecurityContext{ + Capabilities: &corev1.Capabilities{Drop: []corev1.Capability{"ALL"}}, + AllowPrivilegeEscalation: pointer.Bool(false), + } + } +} + +func volumeServiceAccountKubeconfig() *corev1.Volume { + return &corev1.Volume{ + Name: "service-account-kubeconfig", + } +} + +func buildVolumeServiceAccountKubeconfig(v *corev1.Volume) { + v.Secret = &corev1.SecretVolumeSource{ + SecretName: manifests.CloudCredentialOperatorKubeconfig("").Name, + DefaultMode: pointer.Int32(0640), + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/cco/reconcile_test.go b/control-plane-operator/controllers/hostedcontrolplane/cco/reconcile_test.go new file mode 100644 index 0000000000..82bbdab5ca --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/cco/reconcile_test.go @@ -0,0 +1,48 @@ +package cco + +import ( + "testing" + + "github.com/openshift/hypershift/api" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/imageprovider" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/support/testutil" + "github.com/openshift/hypershift/support/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/pointer" +) + +func TestReconcileDeployment(t *testing.T) { + hcp := &hyperv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Namespace: "test-namespace", + }, + Spec: hyperv1.HostedControlPlaneSpec{ + ReleaseImage: "quay.io/ocp-dev/test-release-image:latest", + IssuerURL: "https://www.example.com", + Networking: hyperv1.ClusterNetworking{ + APIServer: &hyperv1.APIServerNetworking{ + Port: pointer.Int32(1234), + }, + }, + }, + } + images := map[string]string{ + "cloud-credential-operator": "quay.io/openshift/cloud-credential-operator:latest", + "token-minter": "quay.io/openshift/token-minter:latest", + "availability-prober": "quay.io/openshift/availability-prober:latest", + } + deployment := manifests.CloudCredentialOperatorDeployment("test-namespace") + imageProvider := imageprovider.NewFromImages(images) + params := NewParams(hcp, "1.0.0", imageProvider, true) + if err := ReconcileDeployment(deployment, params, hcp.Spec.Platform.Type); err != nil { + t.Fatalf("unexpected error: %v", err) + } + deploymentYaml, err := util.SerializeResource(deployment, api.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, deploymentYaml) +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/cco/testdata/zz_fixture_TestReconcileDeployment.yaml b/control-plane-operator/controllers/hostedcontrolplane/cco/testdata/zz_fixture_TestReconcileDeployment.yaml new file mode 100644 index 0000000000..f8ce7151f9 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/cco/testdata/zz_fixture_TestReconcileDeployment.yaml @@ -0,0 +1,120 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/managed-by: control-plane-operator + name: cloud-credential-operator + namespace: test-namespace + ownerReferences: + - apiVersion: hypershift.openshift.io/v1beta1 + blockOwnerDeletion: true + controller: true + kind: HostedControlPlane + name: test + uid: "" +spec: + replicas: 1 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: cloud-credential-operator + hypershift.openshift.io/control-plane-component: cloud-credential-operator + strategy: + type: Recreate + template: + metadata: + annotations: + hypershift.openshift.io/release-image: quay.io/ocp-dev/test-release-image:latest + creationTimestamp: null + labels: + app: cloud-credential-operator + hypershift.openshift.io/control-plane-component: cloud-credential-operator + hypershift.openshift.io/hosted-control-plane: test-namespace + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: hypershift.openshift.io/control-plane + operator: In + values: + - "true" + weight: 50 + - preference: + matchExpressions: + - key: hypershift.openshift.io/cluster + operator: In + values: + - test-namespace + weight: 100 + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + hypershift.openshift.io/hosted-control-plane: test-namespace + topologyKey: kubernetes.io/hostname + weight: 100 + automountServiceAccountToken: false + containers: + - args: + - operator + - --kubeconfig=/etc/kubernetes/kubeconfig + command: + - /usr/bin/cloud-credential-operator + env: + - name: RELEASE_VERSION + value: 1.0.0 + - name: KUBECONFIG + value: /etc/kubernetes/kubeconfig + image: quay.io/openshift/cloud-credential-operator:latest + name: cloud-credential-operator + resources: + requests: + cpu: 10m + memory: 75Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/kubernetes + name: service-account-kubeconfig + initContainers: + - command: + - /usr/bin/control-plane-operator + - availability-prober + - --target + - https://kube-apiserver:6443/readyz + - --kubeconfig=/var/kubeconfig/kubeconfig + - --required-api=operator.openshift.io,v1,CloudCredential + - --wait-for-infrastructure-resource + image: quay.io/openshift/availability-prober:latest + imagePullPolicy: IfNotPresent + name: availability-prober + resources: {} + volumeMounts: + - mountPath: /var/kubeconfig + name: service-account-kubeconfig + priorityClassName: hypershift-control-plane + securityContext: + runAsUser: 1001 + tolerations: + - effect: NoSchedule + key: hypershift.openshift.io/control-plane + operator: Equal + value: "true" + - effect: NoSchedule + key: hypershift.openshift.io/cluster + operator: Equal + value: test-namespace + volumes: + - name: service-account-kubeconfig + secret: + defaultMode: 416 + secretName: cloud-credential-operator-kubeconfig +status: {} diff --git a/control-plane-operator/controllers/hostedcontrolplane/cco/testdata/zz_fixture_TestReconcilePodMonitor.yaml b/control-plane-operator/controllers/hostedcontrolplane/cco/testdata/zz_fixture_TestReconcilePodMonitor.yaml new file mode 100644 index 0000000000..1a8c8aac2f --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/cco/testdata/zz_fixture_TestReconcilePodMonitor.yaml @@ -0,0 +1,47 @@ +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + creationTimestamp: null + name: cluster-image-registry-operator + namespace: test-namespace + ownerReferences: + - apiVersion: hypershift.openshift.io/v1beta1 + blockOwnerDeletion: true + controller: true + kind: HostedControlPlane + name: test + uid: "" +spec: + namespaceSelector: + matchNames: + - test-namespace + podMetricsEndpoints: + - bearerTokenSecret: + key: "" + interval: 60s + metricRelabelings: + - action: drop + regex: (.*) + sourceLabels: + - __name__ + - action: replace + replacement: the-cluster-id + targetLabel: _id + path: /metrics + port: metrics + relabelings: + - action: replace + replacement: the-cluster-id + targetLabel: _id + scheme: https + tlsConfig: + ca: + configMap: + key: ca.crt + name: root-ca + cert: {} + serverName: cloud-credential-operator-metrics + selector: + matchLabels: + app: cloud-credential-operator + hypershift.openshift.io/control-plane-component: cloud-credential-operator diff --git a/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/reconcile.go index 418a484e9b..73322c844e 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/cloud/aws/reconcile.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/hypershift-operator/controllers/manifests/controlplaneoperator" "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/proxy" "github.com/openshift/hypershift/support/util" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -97,6 +98,7 @@ func buildCCMContainer(controllerManagerImage string) func(c *corev1.Container) "--authorization-kubeconfig=/etc/kubernetes/kubeconfig/kubeconfig", } c.VolumeMounts = podVolumeMounts().ContainerMounts(c.Name) + proxy.SetEnvVars(&c.Env) } } diff --git a/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/reconcile.go index c9e4d3a0b0..69c33f0737 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/cloud/azure/reconcile.go @@ -8,6 +8,7 @@ import ( "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/hypershift-operator/controllers/manifests/controlplaneoperator" "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/proxy" "github.com/openshift/hypershift/support/util" appsv1 "k8s.io/api/apps/v1" @@ -93,6 +94,7 @@ func buildCCMContainer(p *AzureParams, controllerManagerImage, namespace string) "--v=4", } c.VolumeMounts = podVolumeMounts().ContainerMounts(c.Name) + proxy.SetEnvVars(&c.Env) } } diff --git a/control-plane-operator/controllers/hostedcontrolplane/cloud/kubevirt/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/cloud/kubevirt/reconcile.go index 6fe63c7a73..08067e2231 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/cloud/kubevirt/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/cloud/kubevirt/reconcile.go @@ -106,13 +106,11 @@ func ReconcileDeployment(deployment *appsv1.Deployment, hcp *hyperv1.HostedContr isExternalInfra = true } deploymentConfig := newDeploymentConfig() + deploymentConfig.SetDefaults(hcp, nil, nil) deployment.Spec = appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: ccmLabels(), }, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RecreateDeploymentStrategyType, - }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: ccmLabels(), diff --git a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/config.go b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/config.go index 87ee9d06b5..7381471591 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/config.go +++ b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/config.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "path" + "strings" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -67,5 +68,24 @@ func reconcileConfig(cfg *openshiftcpv1.OpenShiftControllerManagerConfig, minTLS CipherSuites: cipherSuites, }, } + + // disables automatically setting the `pod-security.kubernetes.io/enforce` label on namespaces by the pod-security-admission-label-synchronization-controller + // see https://github.com/openshift/cluster-policy-controller/blob/50c2a8337f08856bbae4cd419bb8ffcbdf92567c/pkg/cmd/controller/psalabelsyncer.go#L19 + index := -1 + for i := range cfg.FeatureGates { + fg := cfg.FeatureGates[i] + if strings.HasPrefix(fg, "OpenShiftPodSecurityAdmission") { + index = i + break + } + } + + if index != -1 { + // overwrite + cfg.FeatureGates[index] = "OpenShiftPodSecurityAdmission=false" + } else { + cfg.FeatureGates = append(cfg.FeatureGates, "OpenShiftPodSecurityAdmission=false") + } + return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go index 415cb5697b..98595d0973 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/clusterpolicy/deployment.go @@ -32,7 +32,7 @@ var ( } ) -func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, image string, deploymentConfig config.DeploymentConfig, availabilityProberImage string) error { +func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, image string, deploymentConfig config.DeploymentConfig, availabilityProberImage string, platformType hyperv1.PlatformType) error { // preserve existing resource requirements for main CPC container mainContainer := util.FindContainer(cpcContainerMain().Name, deployment.Spec.Template.Spec.Containers) if mainContainer != nil { @@ -64,7 +64,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef deployment.Spec.Template.Spec.AutomountServiceAccountToken = pointer.Bool(false) deploymentConfig.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go b/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go index 48d9ad47cf..e53e6f3845 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go +++ b/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator.go @@ -30,7 +30,10 @@ import ( utilpointer "k8s.io/utils/pointer" ) -const operatorName = "cluster-network-operator" +const ( + operatorName = "cluster-network-operator" + konnectivityProxyName = "konnectivity-proxy" +) type Images struct { NetworkOperator string @@ -125,7 +128,7 @@ func NewParams(hcp *hyperv1.HostedControlPlane, version string, releaseImageProv p.DeploymentConfig.SetDefaultSecurityContext = setDefaultSecurityContext if util.IsPrivateHCP(hcp) { p.APIServerAddress = fmt.Sprintf("api.%s.hypershift.local", hcp.Name) - p.APIServerPort = 443 + p.APIServerPort = util.APIPortForLocalZone(util.IsLBKAS(hcp)) } else { p.APIServerAddress = hcp.Status.ControlPlaneEndpoint.Host p.APIServerPort = hcp.Status.ControlPlaneEndpoint.Port @@ -136,7 +139,8 @@ func NewParams(hcp *hyperv1.HostedControlPlane, version string, releaseImageProv func ReconcileRole(role *rbacv1.Role, ownerRef config.OwnerRef, networkType hyperv1.NetworkType) error { ownerRef.ApplyTo(role) - if networkType == hyperv1.Calico { + // The RBAC below is required when the networkType is not OVNKubernetes https://issues.redhat.com/browse/OCPBUGS-26977 + if networkType != hyperv1.OVNKubernetes { role.Rules = []rbacv1.PolicyRule{ { APIGroups: []string{corev1.SchemeGroupVersion.Group}, @@ -161,6 +165,9 @@ func ReconcileRole(role *rbacv1.Role, ownerRef config.OwnerRef, networkType hype "ovnkube-identity-cm", }, Verbs: []string{ + "list", + "get", + "watch", "create", "patch", "update", @@ -290,9 +297,37 @@ func ReconcileServiceAccount(sa *corev1.ServiceAccount, ownerRef config.OwnerRef return nil } -func ReconcileDeployment(dep *appsv1.Deployment, params Params) error { +func ReconcileDeployment(dep *appsv1.Deployment, params Params, platformType hyperv1.PlatformType) error { params.OwnerRef.ApplyTo(dep) + cnoResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + }, + } + // preserve existing resource requirements for the CNO container + mainContainer := util.FindContainer(operatorName, dep.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + cnoResources = mainContainer.Resources + } + } + + kProxyResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("10Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + }, + } + // preserve existing resource requirements for the konnectivity-proxy container + kProxyContainer := util.FindContainer(konnectivityProxyName, dep.Spec.Template.Spec.Containers) + if kProxyContainer != nil { + if len(kProxyContainer.Resources.Requests) > 0 || len(kProxyContainer.Resources.Limits) > 0 { + kProxyResources = kProxyContainer.Resources + } + } + dep.Spec.Replicas = utilpointer.Int32(1) dep.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": operatorName}} dep.Spec.Strategy.Type = appsv1.RecreateDeploymentStrategyType @@ -511,13 +546,10 @@ if [[ -n $sc ]]; then kubectl --kubeconfig $kc delete --ignore-not-found validat {Name: "SOCKS5_PROXY_IMAGE", Value: params.Images.Socks5Proxy}, {Name: "OPENSHIFT_RELEASE_IMAGE", Value: params.DeploymentConfig.AdditionalAnnotations[hyperv1.ReleaseImageAnnotation]}, }...), - Name: operatorName, - Image: params.Images.NetworkOperator, - ImagePullPolicy: corev1.PullIfNotPresent, - Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }}, + Name: operatorName, + Image: params.Images.NetworkOperator, + ImagePullPolicy: corev1.PullIfNotPresent, + Resources: cnoResources, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, VolumeMounts: []corev1.VolumeMount{ {Name: "hosted-etc-kube", MountPath: "/etc/hosted-kubernetes"}, @@ -526,20 +558,16 @@ if [[ -n $sc ]]; then kubectl --kubeconfig $kc delete --ignore-not-found validat }, { // CNO uses konnectivity-proxy to perform proxy readiness checks through the hosted cluster's network - Name: "konnectivity-proxy", + // Disable the resolver to ensure that CNO connects to the exact proxy address provided + Name: konnectivityProxyName, Image: params.Images.Socks5Proxy, - Command: []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy", "--resolve-from-guest-cluster-dns=true"}, + Command: []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy", "--disable-resolver"}, Args: []string{"run"}, Env: []corev1.EnvVar{{ Name: "KUBECONFIG", Value: "/etc/kubernetes/kubeconfig", }}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, + Resources: kProxyResources, VolumeMounts: []corev1.VolumeMount{ {Name: "hosted-etc-kube", MountPath: "/etc/kubernetes"}, {Name: "konnectivity-proxy-cert", MountPath: "/etc/konnectivity/proxy-client"}, @@ -555,7 +583,7 @@ if [[ -n $sc ]]; then kubectl --kubeconfig $kc delete --ignore-not-found validat } params.DeploymentConfig.ApplyTo(dep) - util.AvailabilityProber(kas.InClusterKASReadyURL(), params.AvailabilityProberImage, &dep.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), params.AvailabilityProberImage, &dep.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { o.KubeconfigVolumeName = "hosted-etc-kube" o.RequiredAPIs = []schema.GroupVersionKind{ {Group: "operator.openshift.io", Version: "v1", Kind: "Network"}, diff --git a/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator_test.go b/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator_test.go index e13e2d60c1..cbf5c76b4c 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator_test.go +++ b/control-plane-operator/controllers/hostedcontrolplane/cno/clusternetworkoperator_test.go @@ -4,7 +4,17 @@ import ( "strconv" "testing" + . "github.com/onsi/gomega" + routev1 "github.com/openshift/api/route/v1" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hyperapi "github.com/openshift/hypershift/support/api" + "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/testutil" + "github.com/openshift/hypershift/support/util" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" ) func TestReconcileDeployment(t *testing.T) { @@ -12,6 +22,7 @@ func TestReconcileDeployment(t *testing.T) { name string params Params expectProxyAPIServerAddress bool + cnoResources *corev1.ResourceRequirements }{ { name: "No private apiserver connectivity, proxy apiserver address is set", @@ -21,6 +32,20 @@ func TestReconcileDeployment(t *testing.T) { name: "Private apiserver connectivity, proxy apiserver address is unset", params: Params{IsPrivate: true}, }, + { + name: "Preserve existing resources", + expectProxyAPIServerAddress: true, + cnoResources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1000Mi"), + }, + }, + }, } for _, tc := range tcs { @@ -30,7 +55,17 @@ func TestReconcileDeployment(t *testing.T) { } dep := &appsv1.Deployment{} - if err := ReconcileDeployment(dep, tc.params); err != nil { + + if tc.cnoResources != nil { + dep.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: operatorName, + Resources: *tc.cnoResources, + }, + } + } + + if err := ReconcileDeployment(dep, tc.params, hyperv1.NonePlatform); err != nil { t.Fatalf("ReconcileDeployment: %v", err) } @@ -47,6 +82,206 @@ func TestReconcileDeployment(t *testing.T) { strconv.FormatBool(tc.expectProxyAPIServerAddress), strconv.FormatBool(hasProxyAPIServerAddress)) } + + deploymentYaml, err := util.SerializeResource(dep, hyperapi.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, deploymentYaml) + + }) + } +} + +func TestReconcileRole(t *testing.T) { + type args struct { + role *rbacv1.Role + ownerRef config.OwnerRef + networkType hyperv1.NetworkType + } + tests := []struct { + name string + args args + }{ + { + name: "Network type OVNKubernetes", + args: args{ + role: &rbacv1.Role{}, + ownerRef: config.OwnerRef{}, + networkType: hyperv1.OVNKubernetes, + }, + }, + { + name: "Network type OpenShiftSDN", + args: args{ + role: &rbacv1.Role{}, + ownerRef: config.OwnerRef{}, + networkType: hyperv1.OpenShiftSDN, + }, + }, + { + name: "Network type Calico", + args: args{ + role: &rbacv1.Role{}, + ownerRef: config.OwnerRef{}, + networkType: hyperv1.Calico, + }, + }, + { + name: "Network type Other", + args: args{ + role: &rbacv1.Role{}, + ownerRef: config.OwnerRef{}, + networkType: hyperv1.Other, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + + g := NewGomegaWithT(t) + err := ReconcileRole(tt.args.role, tt.args.ownerRef, tt.args.networkType) + g.Expect(err).To(BeNil()) + g.Expect(tt.args.role.Rules).To(BeEquivalentTo(expectedRules(tt.args.networkType))) }) } } + +func expectedRules(networkType hyperv1.NetworkType) []rbacv1.PolicyRule { + + ovnRules := []rbacv1.PolicyRule{ + { + APIGroups: []string{corev1.SchemeGroupVersion.Group}, + Resources: []string{ + "events", + "configmaps", + "pods", + "secrets", + "services", + }, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"policy"}, + Resources: []string{"poddisruptionbudgets"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{appsv1.SchemeGroupVersion.Group}, + Resources: []string{"statefulsets", "deployments"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{routev1.SchemeGroupVersion.Group}, + Resources: []string{"routes", "routes/custom-host"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"monitoring.coreos.com", "monitoring.rhobs"}, + Resources: []string{ + "servicemonitors", + "prometheusrules", + }, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{hyperv1.GroupVersion.Group}, + Resources: []string{ + "hostedcontrolplanes", + }, + Verbs: []string{ + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{hyperv1.GroupVersion.Group}, + Resources: []string{ + "hostedcontrolplanes/status", + }, + Verbs: []string{"*"}, + }, + } + + otherNetworkRules := []rbacv1.PolicyRule{ + { + APIGroups: []string{corev1.SchemeGroupVersion.Group}, + Resources: []string{ + "configmaps", + }, + ResourceNames: []string{ + "openshift-service-ca.crt", + }, + Verbs: []string{ + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{corev1.SchemeGroupVersion.Group}, + Resources: []string{ + "configmaps", + }, + ResourceNames: []string{ + "ovnkube-identity-cm", + }, + Verbs: []string{ + "list", + "get", + "watch", + "create", + "patch", + "update", + }, + }, + { + APIGroups: []string{appsv1.SchemeGroupVersion.Group}, + Resources: []string{"statefulsets", "deployments"}, + Verbs: []string{"list", "watch"}, + }, + { + APIGroups: []string{appsv1.SchemeGroupVersion.Group}, + Resources: []string{"deployments"}, + ResourceNames: []string{ + "multus-admission-controller", + "network-node-identity", + }, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{corev1.SchemeGroupVersion.Group}, + Resources: []string{"services"}, + ResourceNames: []string{ + "multus-admission-controller", + "network-node-identity", + }, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{hyperv1.GroupVersion.Group}, + Resources: []string{ + "hostedcontrolplanes", + }, + Verbs: []string{ + "get", + "list", + "watch", + }, + }, + { + APIGroups: []string{hyperv1.GroupVersion.Group}, + Resources: []string{ + "hostedcontrolplanes/status", + }, + Verbs: []string{"*"}, + }, + } + + if networkType != hyperv1.OVNKubernetes { + return otherNetworkRules + } + + return ovnRules +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_No_private_apiserver_connectivity__proxy_apiserver_address_is_set.yaml b/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_No_private_apiserver_connectivity__proxy_apiserver_address_is_set.yaml new file mode 100644 index 0000000000..46d8bae5ff --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_No_private_apiserver_connectivity__proxy_apiserver_address_is_set.yaml @@ -0,0 +1,198 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/managed-by: control-plane-operator +spec: + replicas: 0 + revisionHistoryLimit: 0 + selector: + matchLabels: + name: cluster-network-operator + strategy: + type: Recreate + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: configs + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + creationTimestamp: null + labels: + app: cluster-network-operator + hypershift.openshift.io/control-plane-component: cluster-network-operator + name: cluster-network-operator + spec: + containers: + - args: + - start + - --listen=0.0.0.0:9104 + - --kubeconfig=/etc/hosted-kubernetes/kubeconfig + - --namespace=openshift-network-operator + - --extra-clusters=management=/configs/management + command: + - /usr/bin/cluster-network-operator + env: + - name: OVN_SBDB_ROUTE_HOST + - name: PROXY_INTERNAL_APISERVER_ADDRESS + value: "true" + - name: HYPERSHIFT + value: "true" + - name: HOSTED_CLUSTER_NAME + - name: TOKEN_AUDIENCE + - name: RELEASE_VERSION + value: 4.11.0 + - name: APISERVER_OVERRIDE_HOST + - name: APISERVER_OVERRIDE_PORT + value: "0" + - name: OVN_NB_RAFT_ELECTION_TIMER + value: "10" + - name: OVN_SB_RAFT_ELECTION_TIMER + value: "16" + - name: OVN_NORTHD_PROBE_INTERVAL + value: "5000" + - name: OVN_CONTROLLER_INACTIVITY_PROBE + value: "180000" + - name: OVN_NB_INACTIVITY_PROBE + value: "60000" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HOSTED_CLUSTER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDN_IMAGE + - name: KUBE_PROXY_IMAGE + - name: KUBE_RBAC_PROXY_IMAGE + - name: MULTUS_IMAGE + - name: MULTUS_ADMISSION_CONTROLLER_IMAGE + - name: CNI_PLUGINS_IMAGE + - name: BOND_CNI_PLUGIN_IMAGE + - name: WHEREABOUTS_CNI_IMAGE + - name: ROUTE_OVERRRIDE_CNI_IMAGE + - name: MULTUS_NETWORKPOLICY_IMAGE + - name: OVN_IMAGE + - name: OVN_CONTROL_PLANE_IMAGE + - name: EGRESS_ROUTER_CNI_IMAGE + - name: KURYR_DAEMON_IMAGE + - name: KURYR_CONTROLLER_IMAGE + - name: NETWORK_METRICS_DAEMON_IMAGE + - name: NETWORK_CHECK_SOURCE_IMAGE + - name: NETWORK_CHECK_TARGET_IMAGE + - name: CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE + - name: TOKEN_MINTER_IMAGE + - name: CLI_IMAGE + - name: SOCKS5_PROXY_IMAGE + - name: OPENSHIFT_RELEASE_IMAGE + imagePullPolicy: IfNotPresent + name: cluster-network-operator + resources: + requests: + cpu: 10m + memory: 100Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - mountPath: /configs + name: configs + - args: + - run + command: + - /usr/bin/control-plane-operator + - konnectivity-socks5-proxy + - --disable-resolver + env: + - name: KUBECONFIG + value: /etc/kubernetes/kubeconfig + name: konnectivity-proxy + resources: + requests: + cpu: 10m + memory: 10Mi + volumeMounts: + - mountPath: /etc/kubernetes + name: hosted-etc-kube + - mountPath: /etc/konnectivity/proxy-client + name: konnectivity-proxy-cert + - mountPath: /etc/konnectivity/proxy-ca + name: konnectivity-proxy-ca + initContainers: + - command: + - /usr/bin/control-plane-operator + - availability-prober + - --target + - https://kube-apiserver:6443/readyz + - --kubeconfig=/var/kubeconfig/kubeconfig + - --required-api=operator.openshift.io,v1,Network + - --required-api=network.operator.openshift.io,v1,EgressRouter + - --required-api=network.operator.openshift.io,v1,OperatorPKI + - --wait-for-infrastructure-resource + imagePullPolicy: IfNotPresent + name: availability-prober + resources: {} + volumeMounts: + - mountPath: /var/kubeconfig + name: hosted-etc-kube + - args: + - --kubeconfig=/etc/hosted-kubernetes/kubeconfig + - -n=openshift-network-operator + - delete + - --ignore-not-found=true + - deployment + - network-operator + command: + - /usr/bin/kubectl + name: remove-old-cno + resources: + requests: + cpu: 10m + memory: 50Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - args: + - -c + - |2- + + set -xeuo pipefail + kc=/configs/management + kubectl --kubeconfig $kc config set clusters.default.server "https://[${KUBERNETES_SERVICE_HOST}]:${KUBERNETES_SERVICE_PORT}" + kubectl --kubeconfig $kc config set clusters.default.certificate-authority /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubectl --kubeconfig $kc config set users.admin.tokenFile /var/run/secrets/kubernetes.io/serviceaccount/token + kubectl --kubeconfig $kc config set contexts.default.cluster default + kubectl --kubeconfig $kc config set contexts.default.user admin + kubectl --kubeconfig $kc config set contexts.default.namespace $(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + kubectl --kubeconfig $kc config use-context default + command: + - /bin/bash + name: rewrite-config + resources: + requests: + cpu: 10m + memory: 50Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - mountPath: /configs + name: configs + serviceAccountName: cluster-network-operator + volumes: + - name: hosted-etc-kube + secret: + secretName: service-network-admin-kubeconfig + - emptyDir: {} + name: configs + - name: konnectivity-proxy-cert + secret: + defaultMode: 416 + secretName: konnectivity-client + - configMap: + defaultMode: 416 + name: konnectivity-ca-bundle + name: konnectivity-proxy-ca +status: {} diff --git a/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_Preserve_existing_resources.yaml b/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_Preserve_existing_resources.yaml new file mode 100644 index 0000000000..3598ce36b0 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_Preserve_existing_resources.yaml @@ -0,0 +1,201 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/managed-by: control-plane-operator +spec: + replicas: 0 + revisionHistoryLimit: 0 + selector: + matchLabels: + name: cluster-network-operator + strategy: + type: Recreate + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: configs + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + creationTimestamp: null + labels: + app: cluster-network-operator + hypershift.openshift.io/control-plane-component: cluster-network-operator + name: cluster-network-operator + spec: + containers: + - args: + - start + - --listen=0.0.0.0:9104 + - --kubeconfig=/etc/hosted-kubernetes/kubeconfig + - --namespace=openshift-network-operator + - --extra-clusters=management=/configs/management + command: + - /usr/bin/cluster-network-operator + env: + - name: OVN_SBDB_ROUTE_HOST + - name: PROXY_INTERNAL_APISERVER_ADDRESS + value: "true" + - name: HYPERSHIFT + value: "true" + - name: HOSTED_CLUSTER_NAME + - name: TOKEN_AUDIENCE + - name: RELEASE_VERSION + value: 4.11.0 + - name: APISERVER_OVERRIDE_HOST + - name: APISERVER_OVERRIDE_PORT + value: "0" + - name: OVN_NB_RAFT_ELECTION_TIMER + value: "10" + - name: OVN_SB_RAFT_ELECTION_TIMER + value: "16" + - name: OVN_NORTHD_PROBE_INTERVAL + value: "5000" + - name: OVN_CONTROLLER_INACTIVITY_PROBE + value: "180000" + - name: OVN_NB_INACTIVITY_PROBE + value: "60000" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HOSTED_CLUSTER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDN_IMAGE + - name: KUBE_PROXY_IMAGE + - name: KUBE_RBAC_PROXY_IMAGE + - name: MULTUS_IMAGE + - name: MULTUS_ADMISSION_CONTROLLER_IMAGE + - name: CNI_PLUGINS_IMAGE + - name: BOND_CNI_PLUGIN_IMAGE + - name: WHEREABOUTS_CNI_IMAGE + - name: ROUTE_OVERRRIDE_CNI_IMAGE + - name: MULTUS_NETWORKPOLICY_IMAGE + - name: OVN_IMAGE + - name: OVN_CONTROL_PLANE_IMAGE + - name: EGRESS_ROUTER_CNI_IMAGE + - name: KURYR_DAEMON_IMAGE + - name: KURYR_CONTROLLER_IMAGE + - name: NETWORK_METRICS_DAEMON_IMAGE + - name: NETWORK_CHECK_SOURCE_IMAGE + - name: NETWORK_CHECK_TARGET_IMAGE + - name: CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE + - name: TOKEN_MINTER_IMAGE + - name: CLI_IMAGE + - name: SOCKS5_PROXY_IMAGE + - name: OPENSHIFT_RELEASE_IMAGE + imagePullPolicy: IfNotPresent + name: cluster-network-operator + resources: + limits: + cpu: "1" + memory: 1000Mi + requests: + cpu: 500m + memory: 500Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - mountPath: /configs + name: configs + - args: + - run + command: + - /usr/bin/control-plane-operator + - konnectivity-socks5-proxy + - --disable-resolver + env: + - name: KUBECONFIG + value: /etc/kubernetes/kubeconfig + name: konnectivity-proxy + resources: + requests: + cpu: 10m + memory: 10Mi + volumeMounts: + - mountPath: /etc/kubernetes + name: hosted-etc-kube + - mountPath: /etc/konnectivity/proxy-client + name: konnectivity-proxy-cert + - mountPath: /etc/konnectivity/proxy-ca + name: konnectivity-proxy-ca + initContainers: + - command: + - /usr/bin/control-plane-operator + - availability-prober + - --target + - https://kube-apiserver:6443/readyz + - --kubeconfig=/var/kubeconfig/kubeconfig + - --required-api=operator.openshift.io,v1,Network + - --required-api=network.operator.openshift.io,v1,EgressRouter + - --required-api=network.operator.openshift.io,v1,OperatorPKI + - --wait-for-infrastructure-resource + imagePullPolicy: IfNotPresent + name: availability-prober + resources: {} + volumeMounts: + - mountPath: /var/kubeconfig + name: hosted-etc-kube + - args: + - --kubeconfig=/etc/hosted-kubernetes/kubeconfig + - -n=openshift-network-operator + - delete + - --ignore-not-found=true + - deployment + - network-operator + command: + - /usr/bin/kubectl + name: remove-old-cno + resources: + requests: + cpu: 10m + memory: 50Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - args: + - -c + - |2- + + set -xeuo pipefail + kc=/configs/management + kubectl --kubeconfig $kc config set clusters.default.server "https://[${KUBERNETES_SERVICE_HOST}]:${KUBERNETES_SERVICE_PORT}" + kubectl --kubeconfig $kc config set clusters.default.certificate-authority /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubectl --kubeconfig $kc config set users.admin.tokenFile /var/run/secrets/kubernetes.io/serviceaccount/token + kubectl --kubeconfig $kc config set contexts.default.cluster default + kubectl --kubeconfig $kc config set contexts.default.user admin + kubectl --kubeconfig $kc config set contexts.default.namespace $(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + kubectl --kubeconfig $kc config use-context default + command: + - /bin/bash + name: rewrite-config + resources: + requests: + cpu: 10m + memory: 50Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - mountPath: /configs + name: configs + serviceAccountName: cluster-network-operator + volumes: + - name: hosted-etc-kube + secret: + secretName: service-network-admin-kubeconfig + - emptyDir: {} + name: configs + - name: konnectivity-proxy-cert + secret: + defaultMode: 416 + secretName: konnectivity-client + - configMap: + defaultMode: 416 + name: konnectivity-ca-bundle + name: konnectivity-proxy-ca +status: {} diff --git a/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_Private_apiserver_connectivity__proxy_apiserver_address_is_unset.yaml b/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_Private_apiserver_connectivity__proxy_apiserver_address_is_unset.yaml new file mode 100644 index 0000000000..50be0eb741 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/cno/testdata/zz_fixture_TestReconcileDeployment_Private_apiserver_connectivity__proxy_apiserver_address_is_unset.yaml @@ -0,0 +1,199 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/managed-by: control-plane-operator +spec: + replicas: 0 + revisionHistoryLimit: 0 + selector: + matchLabels: + name: cluster-network-operator + strategy: + type: Recreate + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: configs + target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' + creationTimestamp: null + labels: + app: cluster-network-operator + hypershift.openshift.io/control-plane-component: cluster-network-operator + name: cluster-network-operator + spec: + containers: + - args: + - start + - --listen=0.0.0.0:9104 + - --kubeconfig=/etc/hosted-kubernetes/kubeconfig + - --namespace=openshift-network-operator + - --extra-clusters=management=/configs/management + command: + - /usr/bin/cluster-network-operator + env: + - name: OVN_SBDB_ROUTE_HOST + value: ovnkube-sbdb.apps..hypershift.local + - name: OVN_SBDB_ROUTE_LABELS + value: hypershift.openshift.io/hosted-control-plane=,hypershift.openshift.io/internal-route=true + - name: HYPERSHIFT + value: "true" + - name: HOSTED_CLUSTER_NAME + - name: TOKEN_AUDIENCE + - name: RELEASE_VERSION + value: 4.11.0 + - name: APISERVER_OVERRIDE_HOST + - name: APISERVER_OVERRIDE_PORT + value: "0" + - name: OVN_NB_RAFT_ELECTION_TIMER + value: "10" + - name: OVN_SB_RAFT_ELECTION_TIMER + value: "16" + - name: OVN_NORTHD_PROBE_INTERVAL + value: "5000" + - name: OVN_CONTROLLER_INACTIVITY_PROBE + value: "180000" + - name: OVN_NB_INACTIVITY_PROBE + value: "60000" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: HOSTED_CLUSTER_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: SDN_IMAGE + - name: KUBE_PROXY_IMAGE + - name: KUBE_RBAC_PROXY_IMAGE + - name: MULTUS_IMAGE + - name: MULTUS_ADMISSION_CONTROLLER_IMAGE + - name: CNI_PLUGINS_IMAGE + - name: BOND_CNI_PLUGIN_IMAGE + - name: WHEREABOUTS_CNI_IMAGE + - name: ROUTE_OVERRRIDE_CNI_IMAGE + - name: MULTUS_NETWORKPOLICY_IMAGE + - name: OVN_IMAGE + - name: OVN_CONTROL_PLANE_IMAGE + - name: EGRESS_ROUTER_CNI_IMAGE + - name: KURYR_DAEMON_IMAGE + - name: KURYR_CONTROLLER_IMAGE + - name: NETWORK_METRICS_DAEMON_IMAGE + - name: NETWORK_CHECK_SOURCE_IMAGE + - name: NETWORK_CHECK_TARGET_IMAGE + - name: CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE + - name: TOKEN_MINTER_IMAGE + - name: CLI_IMAGE + - name: SOCKS5_PROXY_IMAGE + - name: OPENSHIFT_RELEASE_IMAGE + imagePullPolicy: IfNotPresent + name: cluster-network-operator + resources: + requests: + cpu: 10m + memory: 100Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - mountPath: /configs + name: configs + - args: + - run + command: + - /usr/bin/control-plane-operator + - konnectivity-socks5-proxy + - --disable-resolver + env: + - name: KUBECONFIG + value: /etc/kubernetes/kubeconfig + name: konnectivity-proxy + resources: + requests: + cpu: 10m + memory: 10Mi + volumeMounts: + - mountPath: /etc/kubernetes + name: hosted-etc-kube + - mountPath: /etc/konnectivity/proxy-client + name: konnectivity-proxy-cert + - mountPath: /etc/konnectivity/proxy-ca + name: konnectivity-proxy-ca + initContainers: + - command: + - /usr/bin/control-plane-operator + - availability-prober + - --target + - https://kube-apiserver:6443/readyz + - --kubeconfig=/var/kubeconfig/kubeconfig + - --required-api=operator.openshift.io,v1,Network + - --required-api=network.operator.openshift.io,v1,EgressRouter + - --required-api=network.operator.openshift.io,v1,OperatorPKI + - --wait-for-infrastructure-resource + imagePullPolicy: IfNotPresent + name: availability-prober + resources: {} + volumeMounts: + - mountPath: /var/kubeconfig + name: hosted-etc-kube + - args: + - --kubeconfig=/etc/hosted-kubernetes/kubeconfig + - -n=openshift-network-operator + - delete + - --ignore-not-found=true + - deployment + - network-operator + command: + - /usr/bin/kubectl + name: remove-old-cno + resources: + requests: + cpu: 10m + memory: 50Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - args: + - -c + - |2- + + set -xeuo pipefail + kc=/configs/management + kubectl --kubeconfig $kc config set clusters.default.server "https://[${KUBERNETES_SERVICE_HOST}]:${KUBERNETES_SERVICE_PORT}" + kubectl --kubeconfig $kc config set clusters.default.certificate-authority /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + kubectl --kubeconfig $kc config set users.admin.tokenFile /var/run/secrets/kubernetes.io/serviceaccount/token + kubectl --kubeconfig $kc config set contexts.default.cluster default + kubectl --kubeconfig $kc config set contexts.default.user admin + kubectl --kubeconfig $kc config set contexts.default.namespace $(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + kubectl --kubeconfig $kc config use-context default + command: + - /bin/bash + name: rewrite-config + resources: + requests: + cpu: 10m + memory: 50Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /etc/hosted-kubernetes + name: hosted-etc-kube + - mountPath: /configs + name: configs + serviceAccountName: cluster-network-operator + volumes: + - name: hosted-etc-kube + secret: + secretName: service-network-admin-kubeconfig + - emptyDir: {} + name: configs + - name: konnectivity-proxy-cert + secret: + defaultMode: 416 + secretName: konnectivity-client + - configMap: + defaultMode: 416 + name: konnectivity-ca-bundle + name: konnectivity-proxy-ca +status: {} diff --git a/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go index b5d49e9f4f..97f4dd346b 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/configoperator/reconcile.go @@ -22,6 +22,10 @@ import ( capiv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) +const ( + hostedClusterConfigOperatorName = "hosted-cluster-config-operator" +) + func ReconcileServiceAccount(sa *corev1.ServiceAccount, ownerRef config.OwnerRef) error { ownerRef.ApplyTo(sa) util.EnsurePullSecret(sa, common.PullSecret("").Name) @@ -224,8 +228,8 @@ var ( }, } hccLabels = map[string]string{ - "app": "hosted-cluster-config-operator", - hyperv1.ControlPlaneComponent: "hosted-cluster-config-operator", + "app": hostedClusterConfigOperatorName, + hyperv1.ControlPlaneComponent: hostedClusterConfigOperatorName, } ) @@ -247,6 +251,13 @@ func ReconcileDeployment(deployment *appsv1.Deployment, image, hcpName, openShif } ownerRef.ApplyTo(deployment) + + // preserve existing resource requirements for main scheduler container + mainContainer := util.FindContainer(hostedClusterConfigOperatorName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + deploymentConfig.SetContainerResourcesIfPresent(mainContainer) + } + deployment.Spec = appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: selectorLabels, @@ -281,13 +292,13 @@ func ReconcileDeployment(deployment *appsv1.Deployment, image, hcpName, openShif if openShiftTrustedCABundleConfigMapForCPOExists { util.DeploymentAddOpenShiftTrustedCABundleConfigMap(deployment) } - if isExternalInfraKv(hcp) { + if IsExternalInfraKv(hcp) { // injects the kubevirt credentials secret volume, volume mount path, and appends cli arg. util.DeploymentAddKubevirtInfraCredentials(deployment) } deploymentConfig.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + util.AvailabilityProber(kas.InClusterKASReadyURL(hcp.Spec.Platform.Type), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { o.KubeconfigVolumeName = "kubeconfig" o.RequiredAPIs = []schema.GroupVersionKind{ {Group: "imageregistry.operator.openshift.io", Version: "v1", Kind: "Config"}, @@ -313,7 +324,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, image, hcpName, openShif func hccContainerMain() *corev1.Container { return &corev1.Container{ - Name: "hosted-cluster-config-operator", + Name: hostedClusterConfigOperatorName, } } @@ -341,7 +352,7 @@ func buildHCCContainerMain(image, hcpName, openShiftVersion, kubeVersion string, c.ImagePullPolicy = corev1.PullIfNotPresent c.Command = []string{ "/usr/bin/control-plane-operator", - "hosted-cluster-config-operator", + hostedClusterConfigOperatorName, fmt.Sprintf("--initial-ca-file=%s", path.Join(volumeMounts.Path(c.Name, hccVolumeRootCA().Name), certs.CASignerCertMapKey)), fmt.Sprintf("--cluster-signer-ca-file=%s", path.Join(volumeMounts.Path(c.Name, hccVolumeClusterSignerCA().Name), certs.CASignerCertMapKey)), fmt.Sprintf("--target-kubeconfig=%s", path.Join(volumeMounts.Path(c.Name, hccVolumeKubeconfig().Name), kas.KubeconfigKey)), @@ -410,7 +421,7 @@ func buildHCCClusterSignerCA(v *corev1.Volume) { } } -func isExternalInfraKv(hcp *hyperv1.HostedControlPlane) bool { +func IsExternalInfraKv(hcp *hyperv1.HostedControlPlane) bool { if hcp.Spec.Platform.Kubevirt != nil && hcp.Spec.Platform.Kubevirt.Credentials != nil && hcp.Spec.Platform.Kubevirt.Credentials.InfraKubeConfigSecret != nil && diff --git a/control-plane-operator/controllers/hostedcontrolplane/csi/kubevirt/kubevirt.go b/control-plane-operator/controllers/hostedcontrolplane/csi/kubevirt/kubevirt.go index 773b12e961..0d95703918 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/csi/kubevirt/kubevirt.go +++ b/control-plane-operator/controllers/hostedcontrolplane/csi/kubevirt/kubevirt.go @@ -10,6 +10,7 @@ import ( hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/configoperator" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/imageprovider" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/pki" @@ -161,16 +162,21 @@ func reconcileInfraConfigMap(cm *corev1.ConfigMap, hcp *hyperv1.HostedControlPla default: storageClassEnforcement = "allowDefault: true\nallowAll: false\n" } - + var infraClusterNamespace string + if configoperator.IsExternalInfraKv(hcp) { + infraClusterNamespace = hcp.Spec.Platform.Kubevirt.Credentials.InfraNamespace + } else { + infraClusterNamespace = cm.Namespace + } cm.Data = map[string]string{ - "infraClusterNamespace": cm.Namespace, + "infraClusterNamespace": infraClusterNamespace, "infraClusterLabels": fmt.Sprintf("%s=%s", hyperv1.InfraIDLabel, hcp.Spec.InfraID), "infraStorageClassEnforcement": storageClassEnforcement, } return nil } -func reconcileController(controller *appsv1.Deployment, componentImages map[string]string, deploymentConfig *config.DeploymentConfig) error { +func reconcileController(controller *appsv1.Deployment, componentImages map[string]string, deploymentConfig *config.DeploymentConfig, hcp *hyperv1.HostedControlPlane) error { controller.Spec = *controllerDeployment.Spec.DeepCopy() csiDriverImage, exists := componentImages["kubevirt-csi-driver"] @@ -210,6 +216,41 @@ func reconcileController(controller *appsv1.Deployment, componentImages map[stri } } + if configoperator.IsExternalInfraKv(hcp) { + csiDriverContainerIndex := func() int { + for i, container := range controller.Spec.Template.Spec.Containers { + if container.Name == "csi-driver" { + return i + } + } + return -1 + } + containerIndex := csiDriverContainerIndex() + if containerIndex == -1 { + return fmt.Errorf("unable to find csi-driver container in %s pod", controllerDeployment.Name) + } + csiDriverContainer := controller.Spec.Template.Spec.Containers[containerIndex] + const infraClusterKubeconfigMount = "/var/run/secrets/infracluster" + csiDriverContainer.Args = append(csiDriverContainer.Args, fmt.Sprintf("--infra-cluster-kubeconfig=%s/kubeconfig", infraClusterKubeconfigMount)) + + externalKubeconfigVolumeMount := corev1.VolumeMount{ + Name: "infracluster", + MountPath: infraClusterKubeconfigMount, + } + csiDriverContainer.VolumeMounts = append(csiDriverContainer.VolumeMounts, externalKubeconfigVolumeMount) + controller.Spec.Template.Spec.Containers[containerIndex] = csiDriverContainer + + infraClusterVolume := corev1.Volume{ + Name: "infracluster", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: hyperv1.KubeVirtInfraCredentialsSecretName, + }, + }, + } + controller.Spec.Template.Spec.Volumes = append(controller.Spec.Template.Spec.Volumes, infraClusterVolume) + } + deploymentConfig.ApplyTo(controller) return nil @@ -539,7 +580,7 @@ func ReconcileInfra(client crclient.Client, hcp *hyperv1.HostedControlPlane, ctx controller := manifests.KubevirtCSIDriverController(infraNamespace) _, err = createOrUpdate(ctx, client, controller, func() error { - return reconcileController(controller, releaseImageProvider.ComponentImages(), deploymentConfig) + return reconcileController(controller, releaseImageProvider.ComponentImages(), deploymentConfig, hcp) }) if err != nil { return err diff --git a/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go b/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go index 6fe881ea8f..67947c3eb2 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/cvo/params.go @@ -28,11 +28,17 @@ func NewCVOParams(hcp *hyperv1.HostedControlPlane, releaseImageProvider *imagepr CLIImage: releaseImageProvider.GetImage("cli"), AvailabilityProberImage: releaseImageProvider.GetImage(util.AvailabilityProberImageName), ControlPlaneImage: util.HCPControlPlaneReleaseImage(hcp), - Image: hcp.Spec.ReleaseImage, + Image: releaseImageProvider.GetImage("cluster-version-operator"), OwnerRef: config.OwnerRefFrom(hcp), ClusterID: hcp.Spec.ClusterID, PlatformType: hcp.Spec.Platform.Type, } + // fallback to hcp.Spec.ReleaseImage if "cluster-version-operator" image is not available. + // This could happen for example in local dev enviroments if the "OPERATE_ON_RELEASE_IMAGE" env variable is not set. + if p.Image == "" { + p.Image = hcp.Spec.ReleaseImage + } + if enableCVOManagementClusterMetricsAccess { p.DeploymentConfig.AdditionalLabels = map[string]string{ config.NeedMetricsServerAccessLabel: "true", diff --git a/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go index 497689103f..565cb0033d 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/cvo/reconcile.go @@ -95,6 +95,10 @@ var ( // TODO: Remove when cluster-csi-snapshot-controller-operator stops shipping // its ibm-cloud-managed deployment. "0000_50_cluster-csi-snapshot-controller-operator_07_deployment-ibm-cloud-managed.yaml", + // Omited this file in order to allow the HCCO to create the resource. This allow us to reconcile and sync + // the HCP.Configuration.operatorhub with OperatorHub object in the HostedCluster. This will only occur once. + // From that point the HCCO will use the OperatorHub object in the HostedCluster as a source of truth. + "0000_03_marketplace-operator_02_operatorhub.cr.yaml", } ) @@ -154,7 +158,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef } deploymentConfig.ApplyTo(deployment) util.AvailabilityProber( - kas.InClusterKASReadyURL(), + kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { diff --git a/control-plane-operator/controllers/hostedcontrolplane/dnsoperator/dnsoperator.go b/control-plane-operator/controllers/hostedcontrolplane/dnsoperator/dnsoperator.go index c800a958fa..36667b2d9d 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/dnsoperator/dnsoperator.go +++ b/control-plane-operator/controllers/hostedcontrolplane/dnsoperator/dnsoperator.go @@ -104,7 +104,7 @@ func NewParams(hcp *hyperv1.HostedControlPlane, version string, releaseImageProv // hosted-cluster services, or external services, so the operator does not // require any special proxy configuration or permissions in the management // cluster. -func ReconcileDeployment(dep *appsv1.Deployment, params Params) { +func ReconcileDeployment(dep *appsv1.Deployment, params Params, platformType hyperv1.PlatformType) { dep.Spec.Selector = &metav1.LabelSelector{ MatchLabels: map[string]string{"name": "dns-operator"}, } @@ -161,7 +161,7 @@ func ReconcileDeployment(dep *appsv1.Deployment, params Params) { }, }} util.AvailabilityProber( - kas.InClusterKASReadyURL(), + kas.InClusterKASReadyURL(platformType), params.AvailabilityProberImage, &dep.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { diff --git a/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go index 49c1d6dbbc..c09509ed84 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/etcd/reconcile.go @@ -6,7 +6,6 @@ import ( "strconv" "strings" - hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/pki" "github.com/openshift/hypershift/support/certs" @@ -16,6 +15,7 @@ import ( prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -60,6 +60,12 @@ func etcdHealthzContainer() *corev1.Container { } } +func etcdDefragControllerContainer() *corev1.Container { + return &corev1.Container{ + Name: "etcd-defrag", + } +} + //go:embed etcd-init.sh var etcdInitScript string @@ -96,6 +102,21 @@ func ReconcileStatefulSet(ss *appsv1.StatefulSet, p *EtcdParams) error { util.BuildContainer(etcdHealthzContainer(), buildEtcdHealthzContainer(p, ss.Namespace)), } + // Only deploy etcd-defrag-controller container in HA mode. + // When we perform defragmentation it takes the etcd instance offline for a short amount of time. + // Therefore we only want to do this when there are multiple etcd instances. + if p.DeploymentConfig.Replicas > 1 { + ss.Spec.Template.Spec.Containers = append(ss.Spec.Template.Spec.Containers, + util.BuildContainer(etcdDefragControllerContainer(), buildEtcdDefragControllerContainer(p, ss.Namespace))) + + ss.Spec.Template.Spec.ServiceAccountName = manifests.EtcdDefragControllerServiceAccount("").Name + + if p.DeploymentConfig.AdditionalLabels == nil { + p.DeploymentConfig.AdditionalLabels = make(map[string]string) + } + p.DeploymentConfig.AdditionalLabels[config.NeedManagementKASAccessLabel] = "true" + } + ss.Spec.Template.Spec.InitContainers = []corev1.Container{ util.BuildContainer(ensureDNSContainer(), buildEnsureDNSContainer(p, ss.Namespace)), util.BuildContainer(resetMemberContainer(), buildResetMemberContainer(p, ss.Namespace)), @@ -486,6 +507,35 @@ func buildEtcdHealthzContainer(p *EtcdParams, namespace string) func(c *corev1.C } } +func buildEtcdDefragControllerContainer(p *EtcdParams, namespace string) func(c *corev1.Container) { + return func(c *corev1.Container) { + c.Image = p.CPOImage + c.ImagePullPolicy = corev1.PullIfNotPresent + c.Command = []string{"control-plane-operator"} + c.Args = []string{ + "etcd-defrag-controller", + "--namespace", + namespace, + } + c.VolumeMounts = []corev1.VolumeMount{ + { + Name: "client-tls", + MountPath: "/etc/etcd/tls/client", + }, + { + Name: "etcd-ca", + MountPath: "/etc/etcd/tls/etcd-ca", + }, + } + c.Resources = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + } + } +} + func buildEtcdMetricsContainer(p *EtcdParams, namespace string) func(c *corev1.Container) { return func(c *corev1.Container) { var loInterface, allInterfaces string @@ -658,16 +708,47 @@ func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, p *EtcdPara } p.OwnerRef.ApplyTo(pdb) + util.ReconcilePodDisruptionBudget(pdb, p.Availability) + return nil +} + +func ReconcileDefragControllerRole(role *rbacv1.Role, p *EtcdParams) error { + p.OwnerRef.ApplyTo(role) - var minAvailable int - switch p.Availability { - case hyperv1.SingleReplica: - minAvailable = 0 - case hyperv1.HighlyAvailable: - // For HA clusters, only tolerate disruption of a minority of members - minAvailable = p.DeploymentConfig.Replicas/2 + 1 + role.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{"coordination.k8s.io"}, + Resources: []string{ + "leases", + }, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{ + "create", + "patch", + "update", + }, + }, } - pdb.Spec.MinAvailable = &intstr.IntOrString{Type: intstr.Int, IntVal: int32(minAvailable)} + return nil +} + +func ReconcileDefragControllerRoleBinding(roleBinding *rbacv1.RoleBinding, p *EtcdParams) error { + p.OwnerRef.ApplyTo(roleBinding) + roleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.SchemeGroupVersion.Group, + Kind: "Role", + Name: manifests.EtcdDefragControllerRole("").Name, + } + roleBinding.Subjects = []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: manifests.EtcdDefragControllerServiceAccount("").Name, + }, + } return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go index 375338a656..fabfc8c0d7 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go +++ b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller.go @@ -16,6 +16,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cco" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/pkioperator" pkimanifests "github.com/openshift/hypershift/control-plane-pki-operator/manifests" "k8s.io/apimachinery/pkg/runtime" @@ -71,6 +72,7 @@ import ( "github.com/openshift/hypershift/support/events" "github.com/openshift/hypershift/support/globalconfig" "github.com/openshift/hypershift/support/metrics" + "github.com/openshift/hypershift/support/proxy" "github.com/openshift/hypershift/support/releaseinfo" "github.com/openshift/hypershift/support/upsert" "github.com/openshift/hypershift/support/util" @@ -162,6 +164,7 @@ type HostedControlPlaneReconciler struct { Log logr.Logger ReleaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides + UserReleaseProvider releaseinfo.Provider createOrUpdate func(hcp *hyperv1.HostedControlPlane) upsert.CreateOrUpdateFN EnableCIDebugOutput bool OperateOnReleaseImage string @@ -306,15 +309,42 @@ func (r *HostedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.R // Return early if deleted if !hostedControlPlane.DeletionTimestamp.IsZero() { - + condition := &metav1.Condition{ + Type: string(hyperv1.AWSDefaultSecurityGroupDeleted), + } if shouldCleanupCloudResources(r.Log, hostedControlPlane) { - if err := r.destroyAWSDefaultSecurityGroup(ctx, hostedControlPlane); err != nil { - if awsErrorCode(err) == "UnauthorizedOperation" { - r.Log.Info("Skipping AWS default security group deletion because the operator is not authorized to delete it.") + if code, destroyErr := r.destroyAWSDefaultSecurityGroup(ctx, hostedControlPlane); destroyErr != nil { + condition.Message = "failed to delete AWS default security group" + if code == "DependencyViolation" { + condition.Message = destroyErr.Error() + } + condition.Reason = hyperv1.AWSErrorReason + condition.Status = metav1.ConditionFalse + meta.SetStatusCondition(&hostedControlPlane.Status.Conditions, *condition) + + if err := r.Client.Status().Patch(ctx, hostedControlPlane, client.MergeFromWithOptions(originalHostedControlPlane, client.MergeFromWithOptimisticLock{})); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update status on hcp for security group deletion: %w. Condition error message: %v", err, condition.Message) + } + + if code == "UnauthorizedOperation" { + r.Log.Error(destroyErr, "Skipping AWS default security group deletion because of unauthorized operation.") + } + if code == "DependencyViolation" { + r.Log.Error(destroyErr, "Skipping AWS default security group deletion because of dependency violation.") } else { - return ctrl.Result{}, fmt.Errorf("failed to delete AWS default security group: %w", err) + return ctrl.Result{}, fmt.Errorf("failed to delete AWS default security group: %w", destroyErr) + } + } else { + condition.Message = hyperv1.AllIsWellMessage + condition.Reason = hyperv1.AsExpectedReason + condition.Status = metav1.ConditionTrue + meta.SetStatusCondition(&hostedControlPlane.Status.Conditions, *condition) + + if err := r.Client.Status().Patch(ctx, hostedControlPlane, client.MergeFromWithOptions(originalHostedControlPlane, client.MergeFromWithOptimisticLock{})); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update status on hcp for security group deletion: %w. Condition message: %v", err, condition.Message) } } + done, err := r.removeCloudResources(ctx, hostedControlPlane) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to ensure cloud resources are removed: %w", err) @@ -758,6 +788,9 @@ func (r *HostedControlPlaneReconciler) healthCheckKASLoadBalancers(ctx context.C case !util.IsPublicHCP(hcp): // When the cluster is private, checking the load balancers will depend on whether the load balancer is // using the right subnets. To avoid uncertainty, we'll limit the check to the service endpoint. + if hcp.Spec.Platform.Type == hyperv1.IBMCloudPlatform { + return healthCheckKASEndpoint(manifests.KubeAPIServerService("").Name, config.KASSVCIBMCloudPort) + } return healthCheckKASEndpoint(manifests.KubeAPIServerService("").Name, config.KASSVCPort) case serviceStrategy.Type == hyperv1.Route: externalRoute := manifests.KubeAPIServerExternalPublicRoute(hcp.Namespace) @@ -772,8 +805,12 @@ func (r *HostedControlPlaneReconciler) healthCheckKASLoadBalancers(ctx context.C case serviceStrategy.Type == hyperv1.LoadBalancer: svc := manifests.KubeAPIServerService(hcp.Namespace) port := config.KASSVCPort - if hcp.Spec.Platform.Type == hyperv1.AzurePlatform { - // If Azure we get the SVC handling the LB. + if hcp.Spec.Platform.Type == hyperv1.IBMCloudPlatform { + port = config.KASSVCIBMCloudPort + } + if hcp.Spec.Platform.Type == hyperv1.AzurePlatform || + hcp.Annotations[hyperv1.ManagementPlatformAnnotation] == string(hyperv1.AzurePlatform) { + // If Azure or Kubevirt on Azure we get the SVC handling the LB. // TODO(alberto): remove this hack when having proper traffic management for Azure. svc = manifests.KubeAPIServerServiceAzureLB(hcp.Namespace) port = config.KASSVCLBAzurePort @@ -837,14 +874,6 @@ func (r *HostedControlPlaneReconciler) update(ctx context.Context, hostedControl createOrUpdate := r.createOrUpdate(hostedControlPlane) - if util.IsPrivateHCP(hostedControlPlane) { - r.Log.Info("Removing private IngressController") - // Ensure that if an ingress controller exists from a previous version, it is removed - if err := r.reconcilePrivateIngressController(ctx, hostedControlPlane); err != nil { - return reconcile.Result{}, fmt.Errorf("failed to reconcile private ingresscontroller: %w", err) - } - } - r.Log.Info("Reconciling infrastructure services") if err := r.reconcileInfrastructure(ctx, hostedControlPlane, createOrUpdate); err != nil { return reconcile.Result{}, fmt.Errorf("failed to ensure infrastructure: %w", err) @@ -889,7 +918,8 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont if err := r.Client.Get(ctx, client.ObjectKeyFromObject(pullSecret), pullSecret); err != nil { return err } - userReleaseImage, err := r.ReleaseProvider.Lookup(ctx, hostedControlPlane.Spec.ReleaseImage, pullSecret.Data[corev1.DockerConfigJsonKey]) + // UserReleaseProvider doesn't include registry overrides as they should not get propagated to the data plane. + userReleaseImage, err := r.UserReleaseProvider.Lookup(ctx, hostedControlPlane.Spec.ReleaseImage, pullSecret.Data[corev1.DockerConfigJsonKey]) if err != nil { return fmt.Errorf("failed to get lookup release image: %w", err) } @@ -901,12 +931,22 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont return fmt.Errorf("failed to reconcile default service account: %w", err) } + openShiftTrustedCABundleConfigMapForCPOExists, err := doesOpenShiftTrustedCABundleConfigMapForCPOExist(ctx, r.Client, hostedControlPlane.Namespace) + if err != nil { + return err + } + // Reconcile PKI if _, exists := hostedControlPlane.Annotations[hyperv1.DisablePKIReconciliationAnnotation]; !exists { r.Log.Info("Reconciling PKI") if err := r.reconcilePKI(ctx, hostedControlPlane, infraStatus, createOrUpdate); err != nil { return fmt.Errorf("failed to reconcile PKI: %w", err) } + + r.Log.Info("Reconciling Control Plane PKI Operator") + if err := r.reconcileControlPlanePKIOperator(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate, openShiftTrustedCABundleConfigMapForCPOExists, r.CertRotationScale); err != nil { + return fmt.Errorf("failed to reconcile control plane pki operator: %w", err) + } } // Reconcile etcd @@ -961,7 +1001,7 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont r.Log.Info("Reconciling Kube Scheduler") schedulerDeployment := manifests.SchedulerDeployment(hostedControlPlane.Namespace) if err := r.reconcileKubeScheduler(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate, schedulerDeployment); err != nil { - return fmt.Errorf("failed to reconcile kube controller manager: %w", err) + return fmt.Errorf("failed to reconcile kube scheduler: %w", err) } r.Log.Info("Looking up observed configuration") @@ -1007,11 +1047,6 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont } } - openShiftTrustedCABundleConfigMapForCPOExists, err := doesOpenShiftTrustedCABundleConfigMapForCPOExist(ctx, r.Client, hostedControlPlane.Namespace) - if err != nil { - return err - } - r.Log.Info("Reconciling ignition server") if err := ignitionserver.ReconcileIgnitionServer(ctx, r.Client, @@ -1030,6 +1065,7 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont config.OwnerRefFrom(hostedControlPlane), openShiftTrustedCABundleConfigMapForCPOExists, r.ReleaseProvider.GetMirroredReleaseImage(), + useHCPRouter(hostedControlPlane), ); err != nil { return fmt.Errorf("failed to reconcile ignition server: %w", err) } @@ -1062,7 +1098,7 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont // Reconcile openshift route controller manager r.Log.Info("Reconciling OpenShift Route Controller Manager") - if err := r.reconcileOpenShiftRouteControllerManager(ctx, hostedControlPlane, observedConfig, releaseImageProvider, createOrUpdate); err != nil { + if err := r.reconcileOpenShiftRouteControllerManager(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate); err != nil { return fmt.Errorf("failed to reconcile openshift route controller manager: %w", err) } @@ -1079,7 +1115,7 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont } r.Log.Info("Reconciling ClusterNetworkOperator") - if err := r.reconcileClusterNetworkOperator(ctx, hostedControlPlane, releaseImageProvider, userReleaseImageProvider, createOrUpdate); err != nil { + if err := r.reconcileClusterNetworkOperator(ctx, hostedControlPlane, releaseImageProvider, userReleaseImageProvider, r.ManagementClusterCapabilities.Has(capabilities.CapabilityRoute), createOrUpdate); err != nil { return fmt.Errorf("failed to reconcile cluster network operator: %w", err) } @@ -1104,18 +1140,20 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont return fmt.Errorf("failed to reconcile hosted cluster config operator: %w", err) } - // Reconcile control plane pki operator - r.Log.Info("Reconciling Control Plane PKI Operator") - if err := r.reconcileControlPlanePKIOperator(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate, openShiftTrustedCABundleConfigMapForCPOExists, r.CertRotationScale); err != nil { - return fmt.Errorf("failed to reconcile control plane pki operator: %w", err) - } - // Reconcile cloud controller manager r.Log.Info("Reconciling Cloud Controller Manager") if err := r.reconcileCloudControllerManager(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate); err != nil { return fmt.Errorf("failed to reconcile cloud controller manager: %w", err) } + if hostedControlPlane.Spec.Platform.Type == hyperv1.AWSPlatform { + // Reconcile cloud credential operator + r.Log.Info("Reconciling Cloud Credential Operator") + if err := r.reconcileCloudCredentialOperator(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate); err != nil { + return fmt.Errorf("failed to reconcile cloud controller manager: %w", err) + } + } + // Reconcile OLM r.Log.Info("Reconciling OLM") if err := r.reconcileOperatorLifecycleManager(ctx, hostedControlPlane, releaseImageProvider, userReleaseImageProvider, createOrUpdate); err != nil { @@ -1171,14 +1209,17 @@ func (r *HostedControlPlaneReconciler) reconcile(ctx context.Context, hostedCont } } - r.Log.Info("Reconciling autoscaler") - if err := r.reconcileAutoscaler(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate); err != nil { - return fmt.Errorf("failed to reconcile autoscaler: %w", err) - } + // Disable machine management components if enabled + if _, exists := hostedControlPlane.Annotations[hyperv1.DisableMachineManagement]; !exists { + r.Log.Info("Reconciling autoscaler") + if err := r.reconcileAutoscaler(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate); err != nil { + return fmt.Errorf("failed to reconcile autoscaler: %w", err) + } - r.Log.Info("Reconciling machine approver") - if err := r.reconcileMachineApprover(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate); err != nil { - return fmt.Errorf("failed to reconcile machine approver: %w", err) + r.Log.Info("Reconciling machine approver") + if err := r.reconcileMachineApprover(ctx, hostedControlPlane, releaseImageProvider, createOrUpdate); err != nil { + return fmt.Errorf("failed to reconcile machine approver: %w", err) + } } r.Log.Info("Reconciling default security group") @@ -1208,8 +1249,12 @@ func (r *HostedControlPlaneReconciler) reconcileAPIServerService(ctx context.Con p := kas.NewKubeAPIServerServiceParams(hcp) apiServerService := manifests.KubeAPIServerService(hcp.Namespace) kasSVCPort := config.KASSVCPort - if serviceStrategy.Type == hyperv1.LoadBalancer && hcp.Spec.Platform.Type == hyperv1.AzurePlatform { - // For Azure we currently hardcode 7443 for the SVC LB as 6443 collides with public LB rule for the management cluster. + if hcp.Spec.Platform.Type == hyperv1.IBMCloudPlatform { + kasSVCPort = config.KASSVCIBMCloudPort + } + if serviceStrategy.Type == hyperv1.LoadBalancer && (hcp.Spec.Platform.Type == hyperv1.AzurePlatform || + hcp.Annotations[hyperv1.ManagementPlatformAnnotation] == string(hyperv1.AzurePlatform)) { + // For Azure or Kubevirt on Azure we currently hardcode 7443 for the SVC LB as 6443 collides with public LB rule for the management cluster. // https://bugzilla.redhat.com/show_bug.cgi?id=2060650 // TODO(alberto): explore exposing multiple Azure frontend IPs on the load balancer. kasSVCPort = config.KASSVCLBAzurePort @@ -1221,7 +1266,8 @@ func (r *HostedControlPlaneReconciler) reconcileAPIServerService(ctx context.Con return fmt.Errorf("failed to reconcile API server service: %w", err) } - if serviceStrategy.Type == hyperv1.LoadBalancer && hcp.Spec.Platform.Type == hyperv1.AzurePlatform { + if serviceStrategy.Type == hyperv1.LoadBalancer && (hcp.Spec.Platform.Type == hyperv1.AzurePlatform || + hcp.Spec.Platform.Type == hyperv1.KubevirtPlatform && hcp.Annotations[hyperv1.ManagementPlatformAnnotation] == string(hyperv1.AzurePlatform)) { // Create the svc clusterIP for Azure on config.KASSVCPort as expected by internal consumers. kasSVC := manifests.KubeAPIServerService(hcp.Namespace) if _, err := createOrUpdate(ctx, r.Client, kasSVC, func() error { @@ -1328,7 +1374,7 @@ func (r *HostedControlPlaneReconciler) reconcileKonnectivityServerService(ctx co if serviceStrategy.Route != nil { hostname = serviceStrategy.Route.Hostname } - return kas.ReconcileKonnectivityExternalRoute(konnectivityRoute, p.OwnerRef, hostname, r.DefaultIngressDomain) + return kas.ReconcileKonnectivityExternalRoute(konnectivityRoute, p.OwnerRef, hostname, r.DefaultIngressDomain, useHCPRouter(hcp)) }); err != nil { return fmt.Errorf("failed to reconcile Konnectivity server external route: %w", err) } @@ -1355,47 +1401,41 @@ func (r *HostedControlPlaneReconciler) reconcileOAuthServerService(ctx context.C oauthExternalPrivateRoute := manifests.OauthServerExternalPrivateRoute(hcp.Namespace) if util.IsPublicHCP(hcp) { // Remove the external private route if it exists - err := r.Get(ctx, client.ObjectKeyFromObject(oauthExternalPrivateRoute), oauthExternalPrivateRoute) + _, err := util.DeleteIfNeeded(ctx, r.Client, oauthExternalPrivateRoute) if err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to check whether OAuth external private route exists: %w", err) - } - } else { - if err := r.Delete(ctx, oauthExternalPrivateRoute); err != nil { - return fmt.Errorf("failed to delete OAuth external private route: %w", err) - } + return fmt.Errorf("failed to delete OAuth external private route: %w", err) } + // Reconcile the external public route if _, err := createOrUpdate(ctx, r.Client, oauthExternalPublicRoute, func() error { hostname := "" if serviceStrategy.Route != nil { hostname = serviceStrategy.Route.Hostname } - return oauth.ReconcileExternalPublicRoute(oauthExternalPublicRoute, p.OwnerRef, hostname, r.DefaultIngressDomain) + return oauth.ReconcileExternalPublicRoute(oauthExternalPublicRoute, p.OwnerRef, hostname, r.DefaultIngressDomain, useHCPRouter(hcp)) }); err != nil { return fmt.Errorf("failed to reconcile OAuth external public route: %w", err) } } else { - // Remove the external route if it exists - err := r.Get(ctx, client.ObjectKeyFromObject(oauthExternalPublicRoute), oauthExternalPublicRoute) + // Remove the external public route if it exists + _, err := util.DeleteIfNeeded(ctx, r.Client, oauthExternalPublicRoute) if err != nil { - if !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to check whether OAuth external public route exists: %w", err) + return fmt.Errorf("failed to delete OAuth external public route: %w", err) + } + + // Reconcile the external private route if a hostname is specified + if serviceStrategy.Route != nil && serviceStrategy.Route.Hostname != "" { + if _, err := createOrUpdate(ctx, r.Client, oauthExternalPrivateRoute, func() error { + return oauth.ReconcileExternalPrivateRoute(oauthExternalPrivateRoute, p.OwnerRef, serviceStrategy.Route.Hostname, r.DefaultIngressDomain, useHCPRouter(hcp)) + }); err != nil { + return fmt.Errorf("failed to reconcile OAuth external private route: %w", err) } } else { - if err := r.Delete(ctx, oauthExternalPublicRoute); err != nil { - return fmt.Errorf("failed to delete OAuth external public route: %w", err) - } - } - // Reconcile the external private route - if _, err := createOrUpdate(ctx, r.Client, oauthExternalPrivateRoute, func() error { - hostname := "" - if serviceStrategy.Route != nil { - hostname = serviceStrategy.Route.Hostname + // Remove the external private route if it exists when hostname is not specified + _, err := util.DeleteIfNeeded(ctx, r.Client, oauthExternalPrivateRoute) + if err != nil { + return fmt.Errorf("failed to delete OAuth external private route: %w", err) } - return oauth.ReconcileExternalPrivateRoute(oauthExternalPrivateRoute, p.OwnerRef, hostname, r.DefaultIngressDomain) - }); err != nil { - return fmt.Errorf("failed to reconcile OAuth external private route: %w", err) } } if util.IsPrivateHCP(hcp) { @@ -1631,8 +1671,12 @@ func (r *HostedControlPlaneReconciler) reconcileAPIServerServiceStatus(ctx conte } kasSVCLBPort := config.KASSVCPort - if serviceStrategy.Type == hyperv1.LoadBalancer && hcp.Spec.Platform.Type == hyperv1.AzurePlatform { - // If Azure we get the SVC handling the LB. + if hcp.Spec.Platform.Type == hyperv1.IBMCloudPlatform { + kasSVCLBPort = config.KASSVCIBMCloudPort + } + if serviceStrategy.Type == hyperv1.LoadBalancer && (hcp.Spec.Platform.Type == hyperv1.AzurePlatform || + hcp.Annotations[hyperv1.ManagementPlatformAnnotation] == string(hyperv1.AzurePlatform)) { + // If Azure or Kubevirt on Azure we get the SVC handling the LB. // TODO(alberto): remove this hack when having proper traffic management for Azure. kasSVCLBPort = config.KASSVCLBAzurePort svc = manifests.KubeAPIServerServiceAzureLB(hcp.Namespace) @@ -1707,7 +1751,7 @@ func (r *HostedControlPlaneReconciler) reconcileOAuthServiceStatus(ctx context.C err = fmt.Errorf("failed to get oauth external route: %w", err) return } - } else { + } else if serviceStrategy.Route != nil && serviceStrategy.Route.Hostname != "" { route = manifests.OauthServerExternalPrivateRoute(hcp.Namespace) if err = r.Get(ctx, client.ObjectKeyFromObject(route), route); err != nil { if apierrors.IsNotFound(err) { @@ -1717,6 +1761,16 @@ func (r *HostedControlPlaneReconciler) reconcileOAuthServiceStatus(ctx context.C err = fmt.Errorf("failed to get oauth internal route: %w", err) return } + } else { + route = manifests.OauthServerInternalRoute(hcp.Namespace) + if err = r.Get(ctx, client.ObjectKeyFromObject(route), route); err != nil { + if apierrors.IsNotFound(err) { + err = nil + return + } + err = fmt.Errorf("failed to get oauth internal route: %w", err) + return + } } } return oauth.ReconcileServiceStatus(svc, route, serviceStrategy) @@ -2220,6 +2274,32 @@ func (r *HostedControlPlaneReconciler) reconcileManagedEtcd(ctx context.Context, r.Log.Info("reconciled etcd pdb", "result", result) } + // reconcile etcd-defrag-operator serviceAccount, role and roleBinding + if p.DeploymentConfig.Replicas > 1 { + sa := manifests.EtcdDefragControllerServiceAccount(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, sa, func() error { + p.OwnerRef.ApplyTo(sa) + util.EnsurePullSecret(sa, common.PullSecret(hcp.Namespace).Name) + return nil + }); err != nil { + return fmt.Errorf("failed to reconcile etcd-defrag-operator service account: %w", err) + } + + role := manifests.EtcdDefragControllerRole(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, role, func() error { + return etcd.ReconcileDefragControllerRole(role, p) + }); err != nil { + return fmt.Errorf("failed to reconcile etcd-defrag-operator role: %w", err) + } + + roleBinding := manifests.EtcdDefragControllerRoleBinding(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, roleBinding, func() error { + return etcd.ReconcileDefragControllerRoleBinding(roleBinding, p) + }); err != nil { + return fmt.Errorf("failed to reconcile etcd-defrag-operator role binding: %w", err) + } + } + if result, err := createOrUpdate(ctx, r, statefulSet, func() error { return etcd.ReconcileStatefulSet(statefulSet, p) }); err != nil { @@ -2317,7 +2397,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex serviceKubeconfigSecret := manifests.KASServiceKubeconfigSecret(hcp.Namespace) if _, err := createOrUpdate(ctx, r, serviceKubeconfigSecret, func() error { - return kas.ReconcileServiceKubeconfigSecret(serviceKubeconfigSecret, clientCertSecret, rootCA, p.OwnerRef) + return kas.ReconcileServiceKubeconfigSecret(serviceKubeconfigSecret, clientCertSecret, rootCA, p.OwnerRef, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile service admin kubeconfig secret: %w", err) } @@ -2328,7 +2408,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex if _, err := createOrUpdate(ctx, r, capiKubeconfigSecret, func() error { // TODO(alberto): This secret is currently using the cluster-admin kubeconfig for the guest cluster. // We should create a separate kubeconfig with a tight set of permissions for it to use. - return kas.ReconcileServiceCAPIKubeconfigSecret(capiKubeconfigSecret, clientCertSecret, rootCA, p.OwnerRef, hcp.Spec.InfraID) + return kas.ReconcileServiceCAPIKubeconfigSecret(capiKubeconfigSecret, clientCertSecret, rootCA, p.OwnerRef, hcp.Spec.InfraID, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile CAPI service admin kubeconfig secret: %w", err) } @@ -2342,6 +2422,9 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex externalKubeconfigSecret := manifests.KASExternalKubeconfigSecret(hcp.Namespace, hcp.Spec.KubeConfig) if _, err := createOrUpdate(ctx, r, externalKubeconfigSecret, func() error { + if !util.IsPublicHCP(hcp) && !util.IsRouteKAS(hcp) { + return kas.ReconcileExternalKubeconfigSecret(externalKubeconfigSecret, clientCertSecret, rootCA, p.OwnerRef, p.InternalURL(), p.ExternalKubeconfigKey()) + } return kas.ReconcileExternalKubeconfigSecret(externalKubeconfigSecret, clientCertSecret, rootCA, p.OwnerRef, p.ExternalURL(), p.ExternalKubeconfigKey()) }); err != nil { return fmt.Errorf("failed to reconcile external kubeconfig secret: %w", err) @@ -2375,6 +2458,13 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex return fmt.Errorf("failed to reconcile api server config: %w", err) } + kubeAPIServerAuthConfig := manifests.AuthConfig(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, kubeAPIServerAuthConfig, func() error { + return kas.ReconcileAuthConfig(ctx, r, kubeAPIServerAuthConfig, p.OwnerRef, p.ConfigParams()) + }); err != nil { + return fmt.Errorf("failed to reconcile api server authentication config: %w", err) + } + kubeAPIServerEgressSelectorConfig := manifests.KASEgressSelectorConfig(hcp.Namespace) if _, err := createOrUpdate(ctx, r, kubeAPIServerEgressSelectorConfig, func() error { return kas.ReconcileEgressSelectorConfig(kubeAPIServerEgressSelectorConfig, p.OwnerRef) @@ -2382,9 +2472,26 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex return fmt.Errorf("failed to reconcile api server egress selector config: %w", err) } + userOauthMetadata := "" + if hcp.Spec.Configuration != nil && + hcp.Spec.Configuration.Authentication != nil && + len(hcp.Spec.Configuration.Authentication.OAuthMetadata.Name) > 0 { + var userOauthMetadataConfigMap corev1.ConfigMap + err := r.Client.Get(ctx, client.ObjectKey{Namespace: hcp.Namespace, Name: hcp.Spec.Configuration.Authentication.OAuthMetadata.Name}, &userOauthMetadataConfigMap) + if err != nil { + return fmt.Errorf("failed to get user oauth metadata configmap: %w", err) + } + if userOauthMetadataConfigMap.Data == nil || len(userOauthMetadataConfigMap.Data) == 0 { + return fmt.Errorf("user oauth metadata configmap %s has no data", userOauthMetadataConfigMap.Name) + } + var ok bool + if userOauthMetadata, ok = userOauthMetadataConfigMap.Data["oauthMetadata"]; !ok { + return fmt.Errorf("user oauth metadata configmap %s has no oauthMetadata key", userOauthMetadataConfigMap.Name) + } + } oauthMetadata := manifests.KASOAuthMetadata(hcp.Namespace) if _, err := createOrUpdate(ctx, r, oauthMetadata, func() error { - return kas.ReconcileOauthMetadata(oauthMetadata, p.OwnerRef, p.ExternalOAuthAddress, p.ExternalOAuthPort) + return kas.ReconcileOauthMetadata(oauthMetadata, p.OwnerRef, userOauthMetadata, p.ExternalOAuthAddress, p.ExternalOAuthPort) }); err != nil { return fmt.Errorf("failed to reconcile oauth metadata: %w", err) } @@ -2506,7 +2613,15 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex if !util.HCPOAuthEnabled(hcp) && len(hcp.Spec.Configuration.Authentication.OIDCProviders) != 0 && hcp.Spec.Configuration.Authentication.OIDCProviders[0].Issuer.CertificateAuthority.Name != "" { - oidcCA = &corev1.LocalObjectReference{Name: manifests.OIDCCAConfigMap("").Name} + // This is needed for version skew between HO and CPO. Older versions of the HO wrote the CA to a fixed + // oidc-ca configmap. Newer versions just copy the configmap with its original name. + name := hcp.Spec.Configuration.Authentication.OIDCProviders[0].Issuer.CertificateAuthority.Name + err := r.Get(ctx, client.ObjectKey{Namespace: hcp.Namespace, Name: name}, &corev1.ConfigMap{}) + if err != nil { + oidcCA = &corev1.LocalObjectReference{Name: manifests.OIDCCAConfigMap("").Name} + } else { + oidcCA = &corev1.LocalObjectReference{Name: name} + } } if _, err := createOrUpdate(ctx, r, kubeAPIServerDeployment, func() error { @@ -2521,6 +2636,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex p.Images, kubeAPIServerConfig, kubeAPIServerAuditConfig, + kubeAPIServerAuthConfig, p.AuditWebhookRef, aesCBCActiveKey, aesCBCBackupKey, @@ -2528,6 +2644,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeAPIServer(ctx context.Contex userReleaseImageProvider.Version(), p.FeatureGate, oidcCA, + p.CipherSuites(), ) }); err != nil { return fmt.Errorf("failed to reconcile api server deployment: %w", err) @@ -2562,7 +2679,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeControllerManager(ctx contex recyclerConfig := manifests.RecyclerConfigMap(hcp.Namespace) if _, err := createOrUpdate(ctx, r, recyclerConfig, func() error { - return kcm.ReconcileRecyclerConfig(recyclerConfig, p.OwnerRef) + return kcm.ReconcileRecyclerConfig(recyclerConfig, p.OwnerRef, releaseImageProvider) }); err != nil { return fmt.Errorf("failed to reconcile kcm recycler config: %w", err) } @@ -2579,6 +2696,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeControllerManager(ctx contex clientCertSecret, rootCAConfigMap, p.OwnerRef, + hcp.Spec.Platform.Type, ) }); err != nil { return fmt.Errorf("failed to reconcile secret '%s/%s': %v", kcmKubeconfigSecret.Namespace, kcmKubeconfigSecret.Name, err) @@ -2601,7 +2719,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeControllerManager(ctx contex } if _, err := createOrUpdate(ctx, r, kcmDeployment, func() error { - return kcm.ReconcileDeployment(kcmDeployment, kcmConfig, rootCAConfigMap, serviceServingCA, p) + return kcm.ReconcileDeployment(kcmDeployment, kcmConfig, rootCAConfigMap, serviceServingCA, p, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile kcm deployment: %w", err) } @@ -2629,6 +2747,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeScheduler(ctx context.Contex clientCertSecret, rootCA, p.OwnerRef, + hcp.Spec.Platform.Type, ) }); err != nil { return fmt.Errorf("failed to reconcile secret '%s/%s': %v", schedulerKubeconfigSecret.Namespace, schedulerKubeconfigSecret.Name, err) @@ -2642,7 +2761,7 @@ func (r *HostedControlPlaneReconciler) reconcileKubeScheduler(ctx context.Contex } if _, err := createOrUpdate(ctx, r, schedulerDeployment, func() error { - return scheduler.ReconcileDeployment(schedulerDeployment, p.OwnerRef, p.DeploymentConfig, p.HyperkubeImage, p.FeatureGates(), p.SchedulerPolicy(), p.AvailabilityProberImage, p.CipherSuites(), p.MinTLSVersion(), p.DisableProfiling, schedulerConfig) + return scheduler.ReconcileDeployment(schedulerDeployment, p.OwnerRef, p.DeploymentConfig, p.HyperkubeImage, p.FeatureGates(), p.SchedulerPolicy(), p.AvailabilityProberImage, p.CipherSuites(), p.MinTLSVersion(), p.DisableProfiling, schedulerConfig, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile scheduler deployment: %w", err) } @@ -2692,8 +2811,18 @@ func (r *HostedControlPlaneReconciler) reconcileOpenShiftAPIServer(ctx context.C } } + noProxy := proxy.DefaultNoProxy(hcp) + + var imageRegistryAdditionalTrustedCAs *corev1.ConfigMap + if hcp.Spec.Configuration != nil && hcp.Spec.Configuration.Image != nil && hcp.Spec.Configuration.Image.AdditionalTrustedCA.Name != "" { + imageRegistryAdditionalTrustedCAs = &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: hcp.Spec.Configuration.Image.AdditionalTrustedCA.Name, Namespace: hcp.Namespace}} + if err := r.Get(ctx, client.ObjectKeyFromObject(imageRegistryAdditionalTrustedCAs), imageRegistryAdditionalTrustedCAs); err != nil { + return fmt.Errorf("failed to get image registry additional trusted CA configmap: %w", err) + } + } + if _, err := createOrUpdate(ctx, r, deployment, func() error { - return oapi.ReconcileDeployment(deployment, p.AuditWebhookRef, p.OwnerRef, oapicfg, auditCfg, serviceServingCA, p.OpenShiftAPIServerDeploymentConfig, p.OpenShiftAPIServerImage, p.ProxyImage, p.EtcdURL, p.AvailabilityProberImage, p.InternalOAuthDisable) + return oapi.ReconcileDeployment(deployment, p.AuditWebhookRef, p.OwnerRef, oapicfg, auditCfg, serviceServingCA, p.OpenShiftAPIServerDeploymentConfig, p.OpenShiftAPIServerImage, p.ProxyImage, p.EtcdURL, p.AvailabilityProberImage, p.InternalOAuthDisable, hcp.Spec.Platform.Type, hcp.Spec.AdditionalTrustBundle, imageRegistryAdditionalTrustedCAs, hcp.Spec.Configuration, p.Proxy, noProxy) }); err != nil { return fmt.Errorf("failed to reconcile openshift apiserver deployment: %w", err) } @@ -2721,7 +2850,7 @@ func (r *HostedControlPlaneReconciler) reconcileOpenShiftOAuthAPIServer(ctx cont deployment := manifests.OpenShiftOAuthAPIServerDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { - return oapi.ReconcileOAuthAPIServerDeployment(deployment, p.OwnerRef, auditCfg, p.OAuthAPIServerDeploymentParams(hcp)) + return oapi.ReconcileOAuthAPIServerDeployment(deployment, p.OwnerRef, auditCfg, p.OAuthAPIServerDeploymentParams(hcp), hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile openshift oauth apiserver deployment: %w", err) } @@ -2780,12 +2909,44 @@ func (r *HostedControlPlaneReconciler) reconcileOAuthServer(ctx context.Context, r.Log.V(2).Info("Reconciled oauth pdb", "result", result) } + auditCfg := manifests.OAuthAuditConfig(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, auditCfg, func() error { + return oauth.ReconcileAuditConfig(auditCfg, p.OwnerRef, p.AuditPolicyConfig()) + }); err != nil { + return fmt.Errorf("failed to reconcile oauth openshift audit config: %w", err) + } + deployment := manifests.OAuthServerDeployment(hcp.Namespace) + clusterNoProxy := proxy.DefaultNoProxy(hcp) if _, err := createOrUpdate(ctx, r, deployment, func() error { - return oauth.ReconcileDeployment(ctx, r, deployment, p.OwnerRef, oauthConfig, p.OAuthServerImage, p.DeploymentConfig, p.IdentityProviders(), p.OauthConfigOverrides, p.AvailabilityProberImage, p.NamedCertificates(), p.Socks5ProxyImage, p.NoProxy, p.ConfigParams(oauthServingCert)) + return oauth.ReconcileDeployment(ctx, r, deployment, p.AuditWebhookRef, p.OwnerRef, oauthConfig, auditCfg, p.OAuthServerImage, p.DeploymentConfig, p.IdentityProviders(), p.OauthConfigOverrides, p.AvailabilityProberImage, p.NamedCertificates(), p.ProxyImage, p.ProxyConfig, clusterNoProxy, p.OAuthNoProxy, p.ConfigParams(oauthServingCert), hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile oauth deployment: %w", err) } + + // Report any IDP configuration errors as a condition on the HCP + new := metav1.Condition{ + Type: string(hyperv1.ValidIDPConfiguration), + Status: metav1.ConditionTrue, + Reason: "IDPConfigurationValid", + Message: "Identity provider configuration is valid", + } + if _, _, err := oauth.ConvertIdentityProviders(ctx, p.IdentityProviders(), p.OauthConfigOverrides, r, hcp.Namespace); err != nil { + // Report the error in a condition on the HCP + r.Log.Error(err, "failed to initialize identity providers") + new = metav1.Condition{ + Type: string(hyperv1.ValidIDPConfiguration), + Status: metav1.ConditionFalse, + Reason: "IDPConfigurationError", + Message: fmt.Sprintf("failed to initialize identity providers: %v", err), + } + } + // Update the condition on the HCP if it has changed + meta.SetStatusCondition(&hcp.Status.Conditions, new) + if err := r.Status().Update(ctx, hcp); err != nil { + return fmt.Errorf("failed to update valid IDP configuration condition: %w", err) + } + return nil } @@ -2823,8 +2984,8 @@ func (r *HostedControlPlaneReconciler) reconcileOpenShiftControllerManager(ctx c return nil } -func (r *HostedControlPlaneReconciler) reconcileOpenShiftRouteControllerManager(ctx context.Context, hcp *hyperv1.HostedControlPlane, observedConfig *globalconfig.ObservedConfig, releaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { - p := routecm.NewOpenShiftRouteControllerManagerParams(hcp, observedConfig, releaseImageProvider, r.SetDefaultSecurityContext) +func (r *HostedControlPlaneReconciler) reconcileOpenShiftRouteControllerManager(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { + p := routecm.NewOpenShiftRouteControllerManagerParams(hcp, releaseImageProvider, r.SetDefaultSecurityContext) config := manifests.OpenShiftRouteControllerManagerConfig(hcp.Namespace) if _, err := createOrUpdate(ctx, r, config, func() error { return routecm.ReconcileOpenShiftRouteControllerManagerConfig(config, p.OwnerRef, p.MinTLSVersion(), p.CipherSuites(), p.Network) @@ -2873,7 +3034,7 @@ func (r *HostedControlPlaneReconciler) reconcileClusterPolicyController(ctx cont deployment := manifests.ClusterPolicyControllerDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { - return clusterpolicy.ReconcileDeployment(deployment, p.OwnerRef, p.Image, p.DeploymentConfig, p.AvailabilityProberImage) + return clusterpolicy.ReconcileDeployment(deployment, p.OwnerRef, p.Image, p.DeploymentConfig, p.AvailabilityProberImage, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile cluster policy controller deployment: %w", err) } @@ -2931,7 +3092,7 @@ func (r *HostedControlPlaneReconciler) reconcileClusterVersionOperator(ctx conte return nil } -func (r *HostedControlPlaneReconciler) reconcileClusterNetworkOperator(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, userReleaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { +func (r *HostedControlPlaneReconciler) reconcileClusterNetworkOperator(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, userReleaseImageProvider *imageprovider.ReleaseImageProvider, hasRouteCap bool, createOrUpdate upsert.CreateOrUpdateFN) error { p := cno.NewParams(hcp, userReleaseImageProvider.Version(), releaseImageProvider, userReleaseImageProvider, r.SetDefaultSecurityContext, r.DefaultIngressDomain) sa := manifests.ClusterNetworkOperatorServiceAccount(hcp.Namespace) @@ -2957,7 +3118,7 @@ func (r *HostedControlPlaneReconciler) reconcileClusterNetworkOperator(ctx conte deployment := manifests.ClusterNetworkOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { - return cno.ReconcileDeployment(deployment, p) + return cno.ReconcileDeployment(deployment, p, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile cluster network operator deployment: %w", err) } @@ -2974,6 +3135,23 @@ func (r *HostedControlPlaneReconciler) reconcileClusterNetworkOperator(ctx conte return fmt.Errorf("failed to restart network node identity: %w", err) } + // Clean up ovnkube-sbdb Route if exists + if hasRouteCap { + if _, err := util.DeleteIfNeeded(ctx, r.Client, manifests.OVNKubeSBDBRoute(hcp.Namespace)); err != nil { + return fmt.Errorf("failed to clean up ovnkube-sbdb route: %w", err) + } + } + + // Clean up ovnkube-master-external Service if exists + if _, err := util.DeleteIfNeeded(ctx, r.Client, manifests.MasterExternalService(hcp.Namespace)); err != nil { + return fmt.Errorf("failed to clean up ovnkube-master-external service: %w", err) + } + + // Clean up ovnkube-master-internal Service if exists + if _, err := util.DeleteIfNeeded(ctx, r.Client, manifests.MasterInternalService(hcp.Namespace)); err != nil { + return fmt.Errorf("failed to clean up ovnkube-master-internal service: %w", err) + } + return nil } @@ -3029,26 +3207,28 @@ func (r *HostedControlPlaneReconciler) reconcileClusterNodeTuningOperator(ctx co func (r *HostedControlPlaneReconciler) reconcileDNSOperator(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, userReleaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { p := dnsoperator.NewParams(hcp, userReleaseImageProvider.Version(), releaseImageProvider, userReleaseImageProvider, r.SetDefaultSecurityContext) - rootCA := manifests.RootCAConfigMap(hcp.Namespace) - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(rootCA), rootCA); err != nil { - return err - } + if _, exists := hcp.Annotations[hyperv1.DisablePKIReconciliationAnnotation]; !exists { + rootCA := manifests.RootCAConfigMap(hcp.Namespace) + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(rootCA), rootCA); err != nil { + return err + } - csrSigner := manifests.CSRSignerCASecret(hcp.Namespace) - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(csrSigner), csrSigner); err != nil { - return err - } + csrSigner := manifests.CSRSignerCASecret(hcp.Namespace) + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(csrSigner), csrSigner); err != nil { + return err + } - kubeconfig := manifests.DNSOperatorKubeconfig(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, kubeconfig, func() error { - return pki.ReconcileServiceAccountKubeconfig(kubeconfig, csrSigner, rootCA, hcp, "openshift-dns-operator", "dns-operator") - }); err != nil { - return fmt.Errorf("failed to reconcile dnsoperator kubeconfig: %w", err) + kubeconfig := manifests.DNSOperatorKubeconfig(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, kubeconfig, func() error { + return pki.ReconcileServiceAccountKubeconfig(kubeconfig, csrSigner, rootCA, hcp, "openshift-dns-operator", "dns-operator") + }); err != nil { + return fmt.Errorf("failed to reconcile dnsoperator kubeconfig: %w", err) + } } deployment := manifests.DNSOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { - dnsoperator.ReconcileDeployment(deployment, p) + dnsoperator.ReconcileDeployment(deployment, p, hcp.Spec.Platform.Type) return nil }); err != nil { return fmt.Errorf("failed to reconcile dnsoperator deployment: %w", err) @@ -3059,26 +3239,28 @@ func (r *HostedControlPlaneReconciler) reconcileDNSOperator(ctx context.Context, func (r *HostedControlPlaneReconciler) reconcileIngressOperator(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, userReleaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { p := ingressoperator.NewParams(hcp, userReleaseImageProvider.Version(), releaseImageProvider, userReleaseImageProvider, r.SetDefaultSecurityContext, hcp.Spec.Platform.Type) - rootCA := manifests.RootCAConfigMap(hcp.Namespace) - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(rootCA), rootCA); err != nil { - return err - } + if _, exists := hcp.Annotations[hyperv1.DisablePKIReconciliationAnnotation]; !exists { + rootCA := manifests.RootCAConfigMap(hcp.Namespace) + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(rootCA), rootCA); err != nil { + return err + } - csrSigner := manifests.CSRSignerCASecret(hcp.Namespace) - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(csrSigner), csrSigner); err != nil { - return err - } + csrSigner := manifests.CSRSignerCASecret(hcp.Namespace) + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(csrSigner), csrSigner); err != nil { + return err + } - kubeconfig := manifests.IngressOperatorKubeconfig(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, kubeconfig, func() error { - return pki.ReconcileServiceAccountKubeconfig(kubeconfig, csrSigner, rootCA, hcp, "openshift-ingress-operator", "ingress-operator") - }); err != nil { - return fmt.Errorf("failed to reconcile ingressoperator kubeconfig: %w", err) + kubeconfig := manifests.IngressOperatorKubeconfig(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, kubeconfig, func() error { + return pki.ReconcileServiceAccountKubeconfig(kubeconfig, csrSigner, rootCA, hcp, "openshift-ingress-operator", "ingress-operator") + }); err != nil { + return fmt.Errorf("failed to reconcile ingressoperator kubeconfig: %w", err) + } } deployment := manifests.IngressOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { - ingressoperator.ReconcileDeployment(deployment, p) + ingressoperator.ReconcileDeployment(deployment, p, hcp.Spec.Platform.Type) return nil }); err != nil { return fmt.Errorf("failed to reconcile ingressoperator deployment: %w", err) @@ -3095,77 +3277,98 @@ func (r *HostedControlPlaneReconciler) reconcileIngressOperator(ctx context.Cont return nil } +func (r *HostedControlPlaneReconciler) reconcileCloudCredentialOperator(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { + params := cco.NewParams(hcp, releaseImageProvider.Version(), releaseImageProvider, r.SetDefaultSecurityContext) + + rootCA := manifests.RootCAConfigMap(hcp.Namespace) + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(rootCA), rootCA); err != nil { + return err + } + + csrSigner := manifests.CSRSignerCASecret(hcp.Namespace) + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(csrSigner), csrSigner); err != nil { + return err + } + + kubeconfig := manifests.CloudCredentialOperatorKubeconfig(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, kubeconfig, func() error { + return pki.ReconcileServiceAccountKubeconfig(kubeconfig, csrSigner, rootCA, hcp, cco.WorkerNamespace, cco.WorkerServiceAccount) + }); err != nil { + return fmt.Errorf("failed to reconcile cloud credential operator kubeconfig: %w", err) + } + + deployment := manifests.CloudCredentialOperatorDeployment(hcp.Namespace) + if _, err := createOrUpdate(ctx, r, deployment, func() error { + return cco.ReconcileDeployment(deployment, params, hcp.Spec.Platform.Type) + }); err != nil { + return fmt.Errorf("failed to reconcile cloud credential operator deployment: %w", err) + } + + return nil +} + func (r *HostedControlPlaneReconciler) reconcileOperatorLifecycleManager(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, userReleaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { p := olm.NewOperatorLifecycleManagerParams(hcp, releaseImageProvider, userReleaseImageProvider.Version(), r.SetDefaultSecurityContext) - - if hcp.Spec.OLMCatalogPlacement == hyperv1.ManagementOLMCatalogPlacement { - overrideImages, err := checkCatalogImageOverides(p.CertifiedOperatorsCatalogImageOverride, p.CommunityOperatorsCatalogImageOverride, p.RedHatMarketplaceCatalogImageOverride, p.RedHatOperatorsCatalogImageOverride) - if err != nil { - return fmt.Errorf("failed to reconcile catalogs: %w", err) + if (hcp.Spec.Configuration != nil && hcp.Spec.Configuration.OperatorHub != nil && + hcp.Spec.Configuration.OperatorHub.DisableAllDefaultSources) || + hcp.Spec.OLMCatalogPlacement != hyperv1.ManagementOLMCatalogPlacement { + // Disable all default sources + olmServices := olm.OLMServices(hcp.Namespace) + for _, svc := range olmServices { + if err := r.Client.Delete(ctx, svc.Manifest); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete %s service on OLM reconcile: %w", svc.Name, err) + } + } + } + olmDeployments := olm.OLMDeployments(p, hcp.Namespace) + for _, dep := range olmDeployments { + if _, err := util.DeleteIfNeeded(ctx, r.Client, dep.Manifest); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete %s deployment on OLM reconcile: %w", dep.Name, err) + } + } } - catalogsImageStream := manifests.CatalogsImageStream(hcp.Namespace) - if !overrideImages { - isImageRegistryOverrides := util.ConvertImageRegistryOverrideStringToMap(p.OLMCatalogsISRegistryOverridesAnnotation) - if _, err := createOrUpdate(ctx, r, catalogsImageStream, func() error { - return olm.ReconcileCatalogsImageStream(catalogsImageStream, p.OwnerRef, isImageRegistryOverrides) - }); err != nil { - return fmt.Errorf("failed to reconcile catalogs image stream: %w", err) + } else { + // Enable all default sources + if hcp.Spec.OLMCatalogPlacement == hyperv1.ManagementOLMCatalogPlacement { + overrideImages, err := checkCatalogImageOverides(p.CertifiedOperatorsCatalogImageOverride, p.CommunityOperatorsCatalogImageOverride, p.RedHatMarketplaceCatalogImageOverride, p.RedHatOperatorsCatalogImageOverride) + if err != nil { + return fmt.Errorf("failed to reconcile catalogs: %w", err) } - } else { - if _, err := util.DeleteIfNeeded(ctx, r, catalogsImageStream); err != nil { - return fmt.Errorf("failed to remove OLM Catalog ImageStream: %w", err) + + catalogsImageStream := manifests.CatalogsImageStream(hcp.Namespace) + if !overrideImages { + isImageRegistryOverrides := util.ConvertImageRegistryOverrideStringToMap(p.OLMCatalogsISRegistryOverridesAnnotation) + if _, err := createOrUpdate(ctx, r, catalogsImageStream, func() error { + return olm.ReconcileCatalogsImageStream(catalogsImageStream, p.OwnerRef, isImageRegistryOverrides) + }); err != nil { + return fmt.Errorf("failed to reconcile catalogs image stream: %w", err) + } + } else { + if _, err := util.DeleteIfNeeded(ctx, r, catalogsImageStream); err != nil { + return fmt.Errorf("failed to remove OLM Catalog ImageStream: %w", err) + } } - } - certifiedOperatorsService := manifests.CertifiedOperatorsService(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, certifiedOperatorsService, func() error { - return olm.ReconcileCertifiedOperatorsService(certifiedOperatorsService, p.OwnerRef) - }); err != nil { - return fmt.Errorf("failed to reconcile certified operators service: %w", err) - } - communityOperatorsService := manifests.CommunityOperatorsService(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, communityOperatorsService, func() error { - return olm.ReconcileCommunityOperatorsService(communityOperatorsService, p.OwnerRef) - }); err != nil { - return fmt.Errorf("failed to reconcile community operators service: %w", err) - } - marketplaceOperatorsService := manifests.RedHatMarketplaceOperatorsService(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, marketplaceOperatorsService, func() error { - return olm.ReconcileRedHatMarketplaceOperatorsService(marketplaceOperatorsService, p.OwnerRef) - }); err != nil { - return fmt.Errorf("failed to reconcile marketplace operators service: %w", err) - } - redHatOperatorsService := manifests.RedHatOperatorsService(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, redHatOperatorsService, func() error { - return olm.ReconcileRedHatOperatorsService(redHatOperatorsService, p.OwnerRef) - }); err != nil { - return fmt.Errorf("failed to reconcile red hat operators service: %w", err) - } + olmServices := olm.OLMServices(hcp.Namespace) + for _, svc := range olmServices { + if _, err := createOrUpdate(ctx, r, svc.Manifest, func() error { + return svc.Reconciler(svc.Manifest, p.OwnerRef) + }); err != nil { + return fmt.Errorf("failed to reconcile %s service: %w", svc.Name, err) + } + } - certifiedOperatorsDeployment := manifests.CertifiedOperatorsDeployment(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, certifiedOperatorsDeployment, func() error { - return olm.ReconcileCertifiedOperatorsDeployment(certifiedOperatorsDeployment, p.OwnerRef, p.DeploymentConfig, p.CertifiedOperatorsCatalogImageOverride) - }); err != nil { - return fmt.Errorf("failed to reconcile certified operators deployment: %w", err) - } - communityOperatorsDeployment := manifests.CommunityOperatorsDeployment(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, communityOperatorsDeployment, func() error { - return olm.ReconcileCommunityOperatorsDeployment(communityOperatorsDeployment, p.OwnerRef, p.DeploymentConfig, p.CommunityOperatorsCatalogImageOverride) - }); err != nil { - return fmt.Errorf("failed to reconcile community operators deployment: %w", err) - } - marketplaceOperatorsDeployment := manifests.RedHatMarketplaceOperatorsDeployment(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, marketplaceOperatorsDeployment, func() error { - return olm.ReconcileRedHatMarketplaceOperatorsDeployment(marketplaceOperatorsDeployment, p.OwnerRef, p.DeploymentConfig, p.RedHatMarketplaceCatalogImageOverride) - }); err != nil { - return fmt.Errorf("failed to reconcile marketplace operators deployment: %w", err) - } - redHatOperatorsDeployment := manifests.RedHatOperatorsDeployment(hcp.Namespace) - if _, err := createOrUpdate(ctx, r, redHatOperatorsDeployment, func() error { - return olm.ReconcileRedHatOperatorsDeployment(redHatOperatorsDeployment, p.OwnerRef, p.DeploymentConfig, p.RedHatOperatorsCatalogImageOverride) - }); err != nil { - return fmt.Errorf("failed to reconcile red hat operators deployment: %w", err) + olmDeployments := olm.OLMDeployments(p, hcp.Namespace) + for _, dep := range olmDeployments { + if _, err := createOrUpdate(ctx, r, dep.Manifest, func() error { + return dep.Reconciler(dep.Manifest, p.OwnerRef, p.DeploymentConfig, dep.Image) + }); err != nil { + return fmt.Errorf("failed to reconcile %s deployment with image %s: %w", dep.Name, dep.Image, err) + } + } } } @@ -3186,7 +3389,7 @@ func (r *HostedControlPlaneReconciler) reconcileOperatorLifecycleManager(ctx con } catalogOperatorDeployment := manifests.CatalogOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, catalogOperatorDeployment, func() error { - return olm.ReconcileCatalogOperatorDeployment(catalogOperatorDeployment, p.OwnerRef, p.OLMImage, p.ProxyImage, p.OperatorRegistryImage, p.ReleaseVersion, p.DeploymentConfig, p.AvailabilityProberImage, p.NoProxy) + return olm.ReconcileCatalogOperatorDeployment(catalogOperatorDeployment, p.OwnerRef, p.OLMImage, p.ProxyImage, p.OperatorRegistryImage, p.ReleaseVersion, p.DeploymentConfig, p.AvailabilityProberImage, p.NoProxy, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile catalog operator deployment: %w", err) } @@ -3209,14 +3412,14 @@ func (r *HostedControlPlaneReconciler) reconcileOperatorLifecycleManager(ctx con olmOperatorDeployment := manifests.OLMOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, olmOperatorDeployment, func() error { - return olm.ReconcileOLMOperatorDeployment(olmOperatorDeployment, p.OwnerRef, p.OLMImage, p.ProxyImage, p.ReleaseVersion, p.DeploymentConfig, p.AvailabilityProberImage, p.NoProxy) + return olm.ReconcileOLMOperatorDeployment(olmOperatorDeployment, p.OwnerRef, p.OLMImage, p.ProxyImage, p.ReleaseVersion, p.DeploymentConfig, p.AvailabilityProberImage, p.NoProxy, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile olm operator deployment: %w", err) } packageServerDeployment := manifests.OLMPackageServerDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, packageServerDeployment, func() error { - return olm.ReconcilePackageServerDeployment(packageServerDeployment, p.OwnerRef, p.OLMImage, p.ProxyImage, p.ReleaseVersion, p.PackageServerConfig, p.AvailabilityProberImage, p.NoProxy) + return olm.ReconcilePackageServerDeployment(packageServerDeployment, p.OwnerRef, p.OLMImage, p.ProxyImage, p.ReleaseVersion, p.PackageServerConfig, p.AvailabilityProberImage, p.NoProxy, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile packageserver deployment: %w", err) } @@ -3323,6 +3526,7 @@ func checkCatalogImageOverides(images ...string) (bool, error) { func (r *HostedControlPlaneReconciler) reconcileImageRegistryOperator(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, userReleaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN) error { params := registryoperator.NewParams(hcp, userReleaseImageProvider.Version(), releaseImageProvider, userReleaseImageProvider, r.SetDefaultSecurityContext) + deployment := manifests.ImageRegistryOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { return registryoperator.ReconcileDeployment(deployment, params) @@ -3455,18 +3659,6 @@ func (r *HostedControlPlaneReconciler) reconcileCoreIgnitionConfig(ctx context.C return nil } -func (r *HostedControlPlaneReconciler) reconcilePrivateIngressController(ctx context.Context, hcp *hyperv1.HostedControlPlane) error { - ic := manifests.IngressPrivateIngressController(hcp.Namespace) - if err := r.Get(ctx, client.ObjectKeyFromObject(ic), ic); err == nil { - if err = r.Delete(ctx, ic); err != nil { - return fmt.Errorf("failed to delete private ingress controller: %w", err) - } - } else if !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to get private ingress controller: %w", err) - } - return nil -} - func (r *HostedControlPlaneReconciler) reconcileRouter(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, createOrUpdate upsert.CreateOrUpdateFN, exposeKASThroughRouter bool, privateRouterHost, externalRouterHost string) error { routeList := &routev1.RouteList{} if err := r.List(ctx, routeList, client.InNamespace(hcp.Namespace)); err != nil { @@ -3509,6 +3701,14 @@ func (r *HostedControlPlaneReconciler) reconcileRouter(ctx context.Context, hcp }); err != nil { return fmt.Errorf("failed to reconcile router deployment: %w", err) } + + pdb := manifests.RouterPodDisruptionBudget(hcp.Namespace) + if _, err := createOrUpdate(ctx, r.Client, pdb, func() error { + ingress.ReconcileRouterPodDisruptionBudget(pdb, hcp.Spec.ControllerAvailabilityPolicy, config.OwnerRefFrom(hcp)) + return nil + }); err != nil { + return fmt.Errorf("failed to reconcile router pod disruption budget: %w", err) + } } // "Admit" routes that we manage so that other code depending on routes continues @@ -3613,7 +3813,7 @@ func (r *HostedControlPlaneReconciler) reconcileControlPlanePKIOperator(ctx cont deployment := manifests.PKIOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r.Client, deployment, func() error { - return pkioperator.ReconcileDeployment(deployment, openShiftTrustedCABundleConfigMapForCPOExists, hcp, releaseImageProvider.GetImage("hypershift"), r.SetDefaultSecurityContext, sa, certRotationScale) + return pkioperator.ReconcileDeployment(deployment, openShiftTrustedCABundleConfigMapForCPOExists, hcp, releaseImageProvider.GetImage(util.CPPKIOImageName), r.SetDefaultSecurityContext, sa, certRotationScale) }); err != nil { return fmt.Errorf("failed to reconcile control plane pki operator deployment: %w", err) } @@ -4018,7 +4218,7 @@ func (r *HostedControlPlaneReconciler) reconcileCSISnapshotControllerOperator(ct deployment := manifests.CSISnapshotControllerOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { - return snapshotcontroller.ReconcileOperatorDeployment(deployment, params) + return snapshotcontroller.ReconcileOperatorDeployment(deployment, params, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile CSI snapshot controller operator deployment: %w", err) } @@ -4054,7 +4254,7 @@ func (r *HostedControlPlaneReconciler) reconcileClusterStorageOperator(ctx conte deployment := manifests.ClusterStorageOperatorDeployment(hcp.Namespace) if _, err := createOrUpdate(ctx, r, deployment, func() error { - return storage.ReconcileOperatorDeployment(deployment, params) + return storage.ReconcileOperatorDeployment(deployment, params, hcp.Spec.Platform.Type) }); err != nil { return fmt.Errorf("failed to reconcile cluster storage operator deployment: %w", err) } @@ -4167,7 +4367,7 @@ func (r *HostedControlPlaneReconciler) reconcileDefaultSecurityGroup(ctx context originalHCP := hcp.DeepCopy() var condition *metav1.Condition - sgID, creationErr := createAWSDefaultSecurityGroup(ctx, r.ec2Client, hcp.Spec.InfraID, hcp.Spec.Platform.AWS.CloudProviderConfig.VPC, hcp.Spec.Platform.AWS.ResourceTags) + sgID, creationErr := createAWSDefaultSecurityGroup(ctx, r.ec2Client, hcp) if creationErr != nil { condition = &metav1.Condition{ Type: string(hyperv1.AWSDefaultSecurityGroupCreated), @@ -4214,10 +4414,16 @@ func awsSecurityGroupName(infraID string) string { return fmt.Sprintf("%s-default-sg", infraID) } -func createAWSDefaultSecurityGroup(ctx context.Context, ec2Client ec2iface.EC2API, infraID, vpcID string, additionalTags []hyperv1.AWSResourceTag) (string, error) { +func createAWSDefaultSecurityGroup(ctx context.Context, ec2Client ec2iface.EC2API, hcp *hyperv1.HostedControlPlane) (string, error) { logger := ctrl.LoggerFrom(ctx) - // Determine VPC cidr + var ( + vpcID = hcp.Spec.Platform.AWS.CloudProviderConfig.VPC + infraID = hcp.Spec.InfraID + additionalTags = hcp.Spec.Platform.AWS.ResourceTags + ) + + // Validate VPC exists vpcResult, err := ec2Client.DescribeVpcsWithContext(ctx, &ec2.DescribeVpcsInput{ VpcIds: []*string{awssdk.String(vpcID)}, }) @@ -4228,10 +4434,19 @@ func createAWSDefaultSecurityGroup(ctx context.Context, ec2Client ec2iface.EC2AP if len(vpcResult.Vpcs) == 0 { return "", fmt.Errorf("vpc %s not found", vpcID) } - vpcCIDR := awssdk.StringValue(vpcResult.Vpcs[0].CidrBlock) + + if len(hcp.Spec.Networking.MachineNetwork) == 0 { + // Should never happen + return "", errors.New("failed to extract machine CIDR while creating default security group: hostedcontrolplane.spec.networking.machineNetwork length is 0") + } + machineCIDRs := make([]string, len(hcp.Spec.Networking.MachineNetwork)) + for i, mNet := range hcp.Spec.Networking.MachineNetwork { + machineCIDRs[i] = mNet.CIDR.String() + } + + // Search for an existing default worker security group and create one if not found describeSGResult, err := ec2Client.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{Filters: awsSecurityGroupFilters(infraID)}) if err != nil { - logger.Error(err, "Failed to list security groups") return "", fmt.Errorf("cannot list security groups, code: %s", awsErrorCode(err)) } sgID := "" @@ -4277,7 +4492,6 @@ func createAWSDefaultSecurityGroup(ctx context.Context, ec2Client ec2iface.EC2AP }, }) if err != nil { - logger.Error(err, "Failed to create security group") return "", fmt.Errorf("failed to create security group, code: %s", awsErrorCode(err)) } sgID = awssdk.StringValue(createSGResult.GroupId) @@ -4287,27 +4501,24 @@ func createAWSDefaultSecurityGroup(ctx context.Context, ec2Client ec2iface.EC2AP GroupIds: []*string{awssdk.String(sgID)}, } if err = ec2Client.WaitUntilSecurityGroupExistsWithContext(ctx, describeSGInput); err != nil { - logger.Error(err, "Failed to wait for security group to exist") return "", fmt.Errorf("failed to find created security group (id: %s), code: %s", sgID, awsErrorCode(err)) } describeSGResult, err = ec2Client.DescribeSecurityGroups(describeSGInput) if err != nil || len(describeSGResult.SecurityGroups) == 0 { - logger.Error(err, "Failed to fetch security group", "sgID", sgID) return "", fmt.Errorf("failed to fetch security group (id: %s), code: %s", sgID, awsErrorCode(err)) } sg = describeSGResult.SecurityGroups[0] logger.Info("Created security group", "id", sgID) } - ingressPermissions := supportawsutil.DefaultWorkerSGIngressRules(vpcCIDR, sgID, awssdk.StringValue(sg.OwnerId)) + ingressPermissions := supportawsutil.DefaultWorkerSGIngressRules(machineCIDRs, sgID, awssdk.StringValue(sg.OwnerId)) _, err = ec2Client.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ GroupId: awssdk.String(sgID), IpPermissions: ingressPermissions, }) if err != nil { if awsErrorCode(err) != "InvalidPermission.Duplicate" { - logger.Error(err, "Failed to set security group ingress rules") return "", fmt.Errorf("failed to set security group ingress rules, code: %s", awsErrorCode(err)) } logger.Info("WARNING: got duplicate permissions error when setting security group ingress permissions", "sgID", sgID) @@ -4315,17 +4526,19 @@ func createAWSDefaultSecurityGroup(ctx context.Context, ec2Client ec2iface.EC2AP return sgID, nil } -func (r *HostedControlPlaneReconciler) destroyAWSDefaultSecurityGroup(ctx context.Context, hcp *hyperv1.HostedControlPlane) error { +func (r *HostedControlPlaneReconciler) destroyAWSDefaultSecurityGroup(ctx context.Context, hcp *hyperv1.HostedControlPlane) (string, error) { + log := ctrl.LoggerFrom(ctx) + if hcp.Spec.Platform.Type != hyperv1.AWSPlatform { - return nil + return "", nil } describeSGResult, err := r.ec2Client.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{Filters: awsSecurityGroupFilters(hcp.Spec.InfraID)}) if err != nil { - return fmt.Errorf("cannot list security groups: %w", err) + return "", fmt.Errorf("cannot list security groups: %w", err) } if len(describeSGResult.SecurityGroups) == 0 { - return nil + return "", nil } sg := describeSGResult.SecurityGroups[0] @@ -4334,7 +4547,13 @@ func (r *HostedControlPlaneReconciler) destroyAWSDefaultSecurityGroup(ctx contex GroupId: sg.GroupId, IpPermissions: sg.IpPermissions, }); err != nil { - return fmt.Errorf("failed to revoke security group ingress permissions for %s: %w", awssdk.StringValue(sg.GroupId), err) + code := "UnknownError" + if awsErr, ok := err.(awserr.Error); ok { + code = awsErr.Code() + } + log.Error(err, "failed to revoke security group ingress permissions", "SecurityGroupID", awssdk.StringValue(sg.GroupId), "code", code) + + return code, fmt.Errorf("failed to revoke security group ingress rules: %s", code) } } @@ -4343,16 +4562,29 @@ func (r *HostedControlPlaneReconciler) destroyAWSDefaultSecurityGroup(ctx contex GroupId: sg.GroupId, IpPermissions: sg.IpPermissionsEgress, }); err != nil { - return fmt.Errorf("failed to revoke security group egress permissions for %s: %w", awssdk.StringValue(sg.GroupId), err) + code := "UnknownError" + if awsErr, ok := err.(awserr.Error); ok { + code = awsErr.Code() + } + log.Error(err, "failed to revoke security group egress permissions", "SecurityGroupID", awssdk.StringValue(sg.GroupId), "code", code) + + return code, fmt.Errorf("failed to revoke security group egress rules: %s", code) } } if _, err = r.ec2Client.DeleteSecurityGroupWithContext(ctx, &ec2.DeleteSecurityGroupInput{ GroupId: sg.GroupId, }); err != nil { - return fmt.Errorf("failed to delete security group %s: %w", awssdk.StringValue(sg.GroupId), err) + code := "UnknownError" + if awsErr, ok := err.(awserr.Error); ok { + code = awsErr.Code() + } + log.Error(err, "failed to delete security group", "SecurityGroupID", awssdk.StringValue(sg.GroupId), "code", code) + + return code, fmt.Errorf("failed to delete security group %s: %s", awssdk.StringValue(sg.GroupId), code) } - return nil + + return "", nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go index c6c33cc9a5..b671d4686a 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go +++ b/control-plane-operator/controllers/hostedcontrolplane/hostedcontrolplane_controller_test.go @@ -984,6 +984,7 @@ func TestEventHandling(t *testing.T) { Client: c, ManagementClusterCapabilities: &fakecapabilities.FakeSupportAllCapabilities{}, ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{}, + UserReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{}, reconcileInfrastructureStatus: func(context.Context, *hyperv1.HostedControlPlane) (InfrastructureStatus, error) { return readyInfraStatus, nil }, @@ -1318,6 +1319,7 @@ func TestNonReadyInfraTriggersRequeueAfter(t *testing.T) { Client: c, ManagementClusterCapabilities: &fakecapabilities.FakeSupportAllCapabilities{}, ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{}, + UserReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{}, reconcileInfrastructureStatus: func(context.Context, *hyperv1.HostedControlPlane) (InfrastructureStatus, error) { return InfrastructureStatus{}, nil }, diff --git a/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go b/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go index 5906a0f57c..2af284a51a 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ignitionserver/ignitionserver.go @@ -46,6 +46,7 @@ func ReconcileIgnitionServer(ctx context.Context, ownerRef config.OwnerRef, openShiftTrustedCABundleConfigMapExists bool, mirroredReleaseImage string, + labelHCPRoutes bool, ) error { log := ctrl.LoggerFrom(ctx) @@ -101,7 +102,7 @@ func ReconcileIgnitionServer(ctx context.Context, if serviceStrategy.Route != nil { hostname = serviceStrategy.Route.Hostname } - err := reconcileExternalRoute(ignitionServerRoute, ownerRef, routeServiceName, hostname, defaultIngressDomain) + err := reconcileExternalRoute(ignitionServerRoute, ownerRef, routeServiceName, hostname, defaultIngressDomain, labelHCPRoutes) if err != nil { return fmt.Errorf("failed to reconcile external route in ignition server: %w", err) } @@ -183,9 +184,9 @@ func ReconcileIgnitionServer(ctx context.Context, "app": ignitionserver.ResourceName, hyperv1.ControlPlaneComponent: ignitionserver.ResourceName, } - configOperatorImage := componentImages["cluster-config-operator"] - if configOperatorImage == "" { - return fmt.Errorf("cluster-config-operator image not found in payload images") + configAPIImage := componentImages["cluster-config-api"] + if configAPIImage == "" { + return fmt.Errorf("cluster-config-api image not found in payload images") } machineConfigOperatorImage := componentImages["machine-config-operator"] if machineConfigOperatorImage == "" { @@ -199,7 +200,7 @@ func ReconcileIgnitionServer(ctx context.Context, // Determine if we need to override the machine config operator and cluster config operator // images based on image mappings present in management cluster. ocpRegistryMapping := util.ConvertImageRegistryOverrideStringToMap(openShiftRegistryOverrides) - overrideConfigOperatorImage, err := lookupMappedImage(ocpRegistryMapping, configOperatorImage) + overrideConfigAPIImage, err := lookupMappedImage(ocpRegistryMapping, configAPIImage) if err != nil { return err } @@ -215,8 +216,8 @@ func ReconcileIgnitionServer(ctx context.Context, } } - if overrideConfigOperatorImage != configOperatorImage { - imageOverrides[configOperatorImage] = overrideConfigOperatorImage + if overrideConfigAPIImage != configAPIImage { + imageOverrides[configAPIImage] = overrideConfigAPIImage } if overrideMachineConfigOperatorImage != machineConfigOperatorImage { @@ -228,7 +229,7 @@ func ReconcileIgnitionServer(ctx context.Context, return reconcileDeployment(ignitionServerDeployment, releaseVersion, utilitiesImage, - configOperatorImage, + configAPIImage, hcp, defaultIngressDomain, hasHealthzHandler, @@ -350,9 +351,9 @@ func reconcileIgnitionServerServiceWithProxy(svc *corev1.Service, strategy *hype return nil } -func reconcileExternalRoute(route *routev1.Route, ownerRef config.OwnerRef, svcName string, hostname string, defaultIngressDomain string) error { +func reconcileExternalRoute(route *routev1.Route, ownerRef config.OwnerRef, svcName string, hostname string, defaultIngressDomain string, labelHCPRoutes bool) error { ownerRef.ApplyTo(route) - return util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, svcName) + return util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, svcName, labelHCPRoutes) } func reconcileInternalRoute(route *routev1.Route, ownerRef config.OwnerRef, svcName string) error { @@ -463,7 +464,7 @@ func reconcileRoleBinding(roleBinding *rbacv1.RoleBinding, role *rbacv1.Role, sa func reconcileDeployment(deployment *appsv1.Deployment, releaseVersion string, utilitiesImage string, - configOperatorImage string, + configAPIImage string, hcp *hyperv1.HostedControlPlane, defaultIngressDomain string, hasHealthzHandler bool, @@ -528,6 +529,20 @@ func reconcileDeployment(deployment *appsv1.Deployment, selectorLabels = deployment.Spec.Selector.MatchLabels } + ignitionServerResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("40Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + }, + } + // preserve existing resource requirements + mainContainer := util.FindContainer(ignitionserver.ResourceName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + ignitionServerResources = mainContainer.Resources + } + } + deployment.Spec = appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: selectorLabels, @@ -588,7 +603,7 @@ func reconcileDeployment(deployment *appsv1.Deployment, InitContainers: []corev1.Container{ { Name: "fetch-feature-gate", - Image: configOperatorImage, + Image: configAPIImage, ImagePullPolicy: corev1.PullIfNotPresent, Command: []string{ "/bin/bash", @@ -665,12 +680,7 @@ func reconcileDeployment(deployment *appsv1.Deployment, ContainerPort: 8080, }, }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("40Mi"), - corev1.ResourceCPU: resource.MustParse("10m"), - }, - }, + Resources: ignitionServerResources, VolumeMounts: []corev1.VolumeMount{ { Name: "serving-cert", @@ -726,6 +736,9 @@ func reconcileDeployment(deployment *appsv1.Deployment, }, } deploymentConfig.Scheduling.PriorityClass = config.DefaultPriorityClass + if hcp.Annotations[hyperv1.ControlPlanePriorityClass] != "" { + deploymentConfig.Scheduling.PriorityClass = hcp.Annotations[hyperv1.ControlPlanePriorityClass] + } deploymentConfig.SetRestartAnnotation(hcp.ObjectMeta) deploymentConfig.SetDefaults(hcp, ignitionServerLabels, nil) deploymentConfig.ApplyTo(deployment) @@ -745,11 +758,11 @@ defaults timeout server 30s frontend ignition-server - bind :::8443 v4v6 ssl crt /tmp/tls.pem + bind :::8443 v4v6 ssl crt /tmp/tls.pem alpn http/1.1 default_backend ignition_servers backend ignition_servers - server ignition-server ignition-server:443 check ssl ca-file /etc/ssl/root-ca/ca.crt + server ignition-server ignition-server:443 check ssl ca-file /etc/ssl/root-ca/ca.crt alpn http/1.1 EOF haproxy -f /tmp/haproxy.conf ` diff --git a/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go b/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go index dc8175a9ed..99063f505b 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ingress/router.go @@ -9,6 +9,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -110,8 +111,6 @@ func generateRouterConfig(routeList *routev1.RouteList, svcsNameToIP map[string] p.Backends = append(p.Backends, backendDesc{Name: "oauth", HostName: route.Spec.Host, DestinationServiceIP: svcsNameToIP[route.Spec.To.Name], DestinationPort: 6443}) case manifests.OauthServerInternalRoute("").Name: p.Backends = append(p.Backends, backendDesc{Name: "oauth_internal", HostName: route.Spec.Host, DestinationServiceIP: svcsNameToIP[route.Spec.To.Name], DestinationPort: 6443}) - case manifests.OVNKubeSBDBRoute("").Name: - p.Backends = append(p.Backends, backendDesc{Name: "ovnkube_sbdb", HostName: route.Spec.Host, DestinationServiceIP: svcsNameToIP[route.Spec.To.Name], DestinationPort: route.Spec.Port.TargetPort.IntVal}) case manifests.MetricsForwarderRoute("").Name: p.Backends = append(p.Backends, backendDesc{Name: "metrics_forwarder", HostName: route.Spec.Host, DestinationServiceIP: svcsNameToIP[route.Spec.To.Name], DestinationPort: route.Spec.Port.TargetPort.IntVal}) } @@ -166,6 +165,7 @@ func ReconcileRouterDeployment(deployment *appsv1.Deployment, ownerRef config.Ow }, }, }, + ServiceAccountName: "", AutomountServiceAccountToken: pointer.Bool(false), }, }, @@ -300,3 +300,13 @@ func ReconcileRouteStatus(route *routev1.Route, externalHostname, internalHostna } route.Status.Ingress = []routev1.RouteIngress{ingress} } + +func ReconcileRouterPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, availability hyperv1.AvailabilityPolicy, ownerRef config.OwnerRef) { + if pdb.CreationTimestamp.IsZero() { + pdb.Spec.Selector = &metav1.LabelSelector{ + MatchLabels: hcpRouterLabels(), + } + } + ownerRef.ApplyTo(pdb) + util.ReconcilePodDisruptionBudget(pdb, availability) +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/ingress/router_test.go b/control-plane-operator/controllers/hostedcontrolplane/ingress/router_test.go index 4d8fc67063..b5a04d20ca 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ingress/router_test.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ingress/router_test.go @@ -53,13 +53,12 @@ func TestGenerateRouterConfig(t *testing.T) { oauthInternal := namedRoute(manifests.OauthServerInternalRoute(testNS), withHost("oauth-internal.example.com"), withSvc("openshift-oauth")) oauthExternalPrivate := namedRoute(manifests.OauthServerExternalPrivateRoute(testNS), withHost("oauth-private.example.com"), withSvc("openshift-oauth")) oauthExternalPublic := namedRoute(manifests.OauthServerExternalPublicRoute(testNS), withHost("oauth-public.example.com"), withSvc("openshift-oauth")) - ovnKube := route(manifests.OVNKubeSBDBRoute("").Name, withHost("ovnkube-sbdb.example.com"), withSvc("ovnkube-master-external"), withPort(3000)) metricsForwarder := route(manifests.MetricsForwarderRoute("").Name, withHost("metrics-forwarder.example.com"), withSvc("metrics-forwarder"), withPort(4000)) kasPublic := namedRoute(manifests.KubeAPIServerExternalPublicRoute(testNS), withHost("kube-apiserver-public.example.com"), withSvc("kube-apiserver")) kasPrivate := namedRoute(manifests.KubeAPIServerExternalPrivateRoute(testNS), withSvc("kube-apiserver-private.example.com"), withSvc("kube-apiserver")) routeList := &routev1.RouteList{ - Items: []routev1.Route{*ignition, *konnectivity, *oauthInternal, *oauthExternalPrivate, *oauthExternalPublic, *ovnKube, *metricsForwarder, *kasPublic, *kasPrivate}, + Items: []routev1.Route{*ignition, *konnectivity, *oauthInternal, *oauthExternalPrivate, *oauthExternalPublic, *metricsForwarder, *kasPublic, *kasPrivate}, } svcsNameToIP := make(map[string]string) diff --git a/control-plane-operator/controllers/hostedcontrolplane/ingress/testdata/zz_fixture_TestGenerateRouterConfig.yaml b/control-plane-operator/controllers/hostedcontrolplane/ingress/testdata/zz_fixture_TestGenerateRouterConfig.yaml index 555ed90ad5..57dcf5fc29 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ingress/testdata/zz_fixture_TestGenerateRouterConfig.yaml +++ b/control-plane-operator/controllers/hostedcontrolplane/ingress/testdata/zz_fixture_TestGenerateRouterConfig.yaml @@ -25,14 +25,12 @@ frontend main acl is_oauth req_ssl_sni -i oauth-public.example.com acl is_oauth_internal req_ssl_sni -i oauth-internal.example.com acl is_oauth_private req_ssl_sni -i oauth-private.example.com - acl is_ovnkube_sbdb req_ssl_sni -i ovnkube-sbdb.example.com use_backend ignition if is_ignition use_backend konnectivity if is_konnectivity use_backend metrics_forwarder if is_metrics_forwarder use_backend oauth if is_oauth use_backend oauth_internal if is_oauth_internal use_backend oauth_private if is_oauth_private - use_backend ovnkube_sbdb if is_ovnkube_sbdb default_backend kube_api listen health_check_http_url @@ -47,7 +45,7 @@ backend konnectivity server konnectivity 0.0.0.1:8091 backend metrics_forwarder - server metrics_forwarder 0.0.0.6:4000 + server metrics_forwarder 0.0.0.5:4000 backend oauth server oauth 0.0.0.4:6443 @@ -58,8 +56,5 @@ backend oauth_internal backend oauth_private server oauth_private 0.0.0.4:6443 -backend ovnkube_sbdb - server ovnkube_sbdb 0.0.0.5:3000 - backend kube_api - server kube_api 0.0.0.8:6443 + server kube_api 0.0.0.7:6443 diff --git a/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go b/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go index 0211ece811..e818ed1e81 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ingressoperator/ingressoperator.go @@ -3,6 +3,7 @@ package ingressoperator import ( "fmt" + configv1 "github.com/openshift/api/config/v1" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/imageprovider" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" @@ -17,16 +18,16 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/intstr" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( - operatorName = "ingress-operator" - ingressOperatorContainerName = "ingress-operator" - metricsHostname = "ingress-operator" - socks5ProxyContainerName = "socks-proxy" - ingressOperatorMetricsPort = 60000 + operatorName = "ingress-operator" + ingressOperatorContainerName = "ingress-operator" + metricsHostname = "ingress-operator" + konnectivityProxyContainerName = "konnectivity-proxy" + ingressOperatorMetricsPort = 60000 + konnectivityProxyPort = 8090 ) type Params struct { @@ -37,9 +38,11 @@ type Params struct { ReleaseVersion string TokenMinterImage string AvailabilityProberImage string - Socks5ProxyImage string + ProxyImage string Platform hyperv1.PlatformType DeploymentConfig config.DeploymentConfig + ProxyConfig *configv1.ProxySpec + NoProxy string } func NewParams(hcp *hyperv1.HostedControlPlane, version string, releaseImageProvider *imageprovider.ReleaseImageProvider, userReleaseImageProvider *imageprovider.ReleaseImageProvider, setDefaultSecurityContext bool, platform hyperv1.PlatformType) Params { @@ -49,55 +52,40 @@ func NewParams(hcp *hyperv1.HostedControlPlane, version string, releaseImageProv HAProxyRouterImage: userReleaseImageProvider.GetImage("haproxy-router"), ReleaseVersion: version, TokenMinterImage: releaseImageProvider.GetImage("token-minter"), - Socks5ProxyImage: releaseImageProvider.GetImage("socks5-proxy"), + ProxyImage: releaseImageProvider.GetImage(util.CPOImageName), AvailabilityProberImage: releaseImageProvider.GetImage(util.AvailabilityProberImageName), Platform: platform, } + if hcp.Spec.Configuration != nil { + p.ProxyConfig = hcp.Spec.Configuration.Proxy + p.NoProxy = proxy.DefaultNoProxy(hcp) + } p.DeploymentConfig.Scheduling.PriorityClass = config.DefaultPriorityClass if hcp.Annotations[hyperv1.ControlPlanePriorityClass] != "" { p.DeploymentConfig.Scheduling.PriorityClass = hcp.Annotations[hyperv1.ControlPlanePriorityClass] } p.DeploymentConfig.SetRestartAnnotation(hcp.ObjectMeta) - p.DeploymentConfig.SetDefaults(hcp, nil, utilpointer.Int(1)) + p.DeploymentConfig.SetDefaults(hcp, nil, ptr.To(1)) p.DeploymentConfig.SetDefaultSecurityContext = setDefaultSecurityContext - p.DeploymentConfig.ReadinessProbes = config.ReadinessProbes{ - ingressOperatorContainerName: { - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/metrics", - Port: intstr.FromInt(ingressOperatorMetricsPort), - Scheme: corev1.URISchemeHTTP, - }, - }, - InitialDelaySeconds: 15, - PeriodSeconds: 60, - SuccessThreshold: 1, - FailureThreshold: 3, - TimeoutSeconds: 5, + return p +} + +func ReconcileDeployment(dep *appsv1.Deployment, params Params, platformType hyperv1.PlatformType) { + ingressOpResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("80Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), }, } - p.DeploymentConfig.LivenessProbes = config.LivenessProbes{ - ingressOperatorContainerName: { - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/metrics", - Port: intstr.FromInt(ingressOperatorMetricsPort), - Scheme: corev1.URISchemeHTTP, - }, - }, - InitialDelaySeconds: 60, - PeriodSeconds: 60, - SuccessThreshold: 1, - FailureThreshold: 5, - TimeoutSeconds: 5, - }, + // preserve existing resource requirements + mainContainer := util.FindContainer(ingressOperatorContainerName, dep.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + ingressOpResources = mainContainer.Resources + } } - return p -} - -func ReconcileDeployment(dep *appsv1.Deployment, params Params) { - dep.Spec.Replicas = utilpointer.Int32(1) + dep.Spec.Replicas = ptr.To[int32](1) dep.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": operatorName}} dep.Spec.Strategy.Type = appsv1.RecreateDeploymentStrategyType if dep.Spec.Template.Annotations == nil { @@ -113,7 +101,7 @@ func ReconcileDeployment(dep *appsv1.Deployment, params Params) { hyperv1.ControlPlaneComponent: operatorName, } - dep.Spec.Template.Spec.AutomountServiceAccountToken = utilpointer.Bool(false) + dep.Spec.Template.Spec.AutomountServiceAccountToken = ptr.To(false) dep.Spec.Template.Spec.Containers = []corev1.Container{{ Command: []string{ "ingress-operator", @@ -136,35 +124,32 @@ func ReconcileDeployment(dep *appsv1.Deployment, params Params) { {Name: "KUBECONFIG", Value: "/etc/kubernetes/kubeconfig"}, { Name: "HTTP_PROXY", - Value: fmt.Sprintf("socks5://127.0.0.1:%d", kas.KonnectivityServerLocalPort), + Value: fmt.Sprintf("http://127.0.0.1:%d", konnectivityProxyPort), }, { Name: "HTTPS_PROXY", - Value: fmt.Sprintf("socks5://127.0.0.1:%d", kas.KonnectivityServerLocalPort), + Value: fmt.Sprintf("http://127.0.0.1:%d", konnectivityProxyPort), }, { Name: "NO_PROXY", Value: manifests.KubeAPIServerService("").Name, }, }, - Name: ingressOperatorContainerName, - Image: params.IngressOperatorImage, - ImagePullPolicy: corev1.PullIfNotPresent, - Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("80Mi"), - }}, + Name: ingressOperatorContainerName, + Image: params.IngressOperatorImage, + ImagePullPolicy: corev1.PullIfNotPresent, + Resources: ingressOpResources, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, VolumeMounts: []corev1.VolumeMount{ {Name: "ingress-operator-kubeconfig", MountPath: "/etc/kubernetes"}, }, }} - dep.Spec.Template.Spec.Containers = append(dep.Spec.Template.Spec.Containers, ingressOperatorSocks5ProxyContainer(params.Socks5ProxyImage)) + dep.Spec.Template.Spec.Containers = append(dep.Spec.Template.Spec.Containers, ingressOperatorKonnectivityProxyContainer(params.ProxyImage, params.ProxyConfig, params.NoProxy)) dep.Spec.Template.Spec.Volumes = []corev1.Volume{ - {Name: "ingress-operator-kubeconfig", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: manifests.IngressOperatorKubeconfig("").Name, DefaultMode: utilpointer.Int32(0640)}}}, - {Name: "admin-kubeconfig", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "service-network-admin-kubeconfig", DefaultMode: utilpointer.Int32(0640)}}}, - {Name: "konnectivity-proxy-cert", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: manifests.KonnectivityClientSecret("").Name, DefaultMode: utilpointer.Int32(0640)}}}, - {Name: "konnectivity-proxy-ca", VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: manifests.KonnectivityCAConfigMap("").Name}, DefaultMode: utilpointer.Int32(0640)}}}, + {Name: "ingress-operator-kubeconfig", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: manifests.IngressOperatorKubeconfig("").Name, DefaultMode: ptr.To[int32](0640)}}}, + {Name: "admin-kubeconfig", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "service-network-admin-kubeconfig", DefaultMode: ptr.To[int32](0640)}}}, + {Name: "konnectivity-proxy-cert", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: manifests.KonnectivityClientSecret("").Name, DefaultMode: ptr.To[int32](0640)}}}, + {Name: "konnectivity-proxy-ca", VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: manifests.KonnectivityCAConfigMap("").Name}, DefaultMode: ptr.To[int32](0640)}}}, } if params.Platform == hyperv1.AWSPlatform { @@ -203,7 +188,7 @@ func ReconcileDeployment(dep *appsv1.Deployment, params Params) { } util.AvailabilityProber( - kas.InClusterKASReadyURL(), + kas.InClusterKASReadyURL(platformType), params.AvailabilityProberImage, &dep.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { @@ -217,16 +202,14 @@ func ReconcileDeployment(dep *appsv1.Deployment, params Params) { params.DeploymentConfig.ApplyTo(dep) } -func ingressOperatorSocks5ProxyContainer(socks5ProxyImage string) corev1.Container { +func ingressOperatorKonnectivityProxyContainer(proxyImage string, proxyConfig *configv1.ProxySpec, noProxy string) corev1.Container { c := corev1.Container{ - Name: socks5ProxyContainerName, - Image: socks5ProxyImage, - Command: []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy", "--resolve-from-guest-cluster-dns=true"}, + Name: konnectivityProxyContainerName, + Image: proxyImage, + Command: []string{"/usr/bin/control-plane-operator", "konnectivity-https-proxy"}, Args: []string{ "run", - // Do not route cloud provider traffic through konnektivity and thus nodes to speed - // up cluster creation. Requires proxy env vars to be set. - "--connect-directly-to-cloud-apis=true", + "--connect-directly-to-cloud-apis", }, Env: []corev1.EnvVar{{ Name: "KUBECONFIG", @@ -244,6 +227,11 @@ func ingressOperatorSocks5ProxyContainer(socks5ProxyImage string) corev1.Contain {Name: "konnectivity-proxy-ca", MountPath: "/etc/konnectivity/proxy-ca"}, }, } + if proxyConfig != nil { + c.Args = append(c.Args, "--http-proxy", proxyConfig.HTTPProxy) + c.Args = append(c.Args, "--https-proxy", proxyConfig.HTTPSProxy) + c.Args = append(c.Args, "--no-proxy", noProxy) + } proxy.SetEnvVars(&c.Env) return c } diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/auditcfg.go b/control-plane-operator/controllers/hostedcontrolplane/kas/auditcfg.go index 8c4f0ab585..1fc1c006fc 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/auditcfg.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/auditcfg.go @@ -12,7 +12,8 @@ import ( ) const ( - AuditPolicyConfigMapKey = "policy.yaml" + AuditPolicyConfigMapKey = "policy.yaml" + AuditPolicyProfileMapKey = "profile" ) func ReconcileAuditConfig(auditCfgMap *corev1.ConfigMap, ownerRef config.OwnerRef, auditConfig configv1.Audit) error { @@ -29,5 +30,6 @@ func ReconcileAuditConfig(auditCfgMap *corev1.ConfigMap, ownerRef config.OwnerRe return err } auditCfgMap.Data[AuditPolicyConfigMapKey] = string(policyBytes) + auditCfgMap.Data[AuditPolicyProfileMapKey] = string(auditConfig.Profile) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/auth.go b/control-plane-operator/controllers/hostedcontrolplane/kas/auth.go new file mode 100644 index 0000000000..ab247fd88e --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/auth.go @@ -0,0 +1,97 @@ +package kas + +import ( + "context" + "encoding/json" + "fmt" + + configv1 "github.com/openshift/api/config/v1" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + hcpconfig "github.com/openshift/hypershift/support/config" + + crclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + AuthConfigMapKey = "auth.json" +) + +func ReconcileAuthConfig(ctx context.Context, c crclient.Client, config *corev1.ConfigMap, ownerRef hcpconfig.OwnerRef, p KubeAPIServerConfigParams) error { + ownerRef.ApplyTo(config) + if config.Data == nil { + config.Data = map[string]string{} + } + authConfig, err := generateAuthConfig(p.Authentication, ctx, c, config.Namespace) + if err != nil { + return fmt.Errorf("failed to generate authentication config: %w", err) + } + serializedConfig, err := json.Marshal(authConfig) + if err != nil { + return fmt.Errorf("failed to serialize kube apiserver authentication config: %w", err) + } + config.Data[AuthenticationConfigKey] = string(serializedConfig) + return nil +} + +func generateAuthConfig(spec *configv1.AuthenticationSpec, ctx context.Context, c crclient.Client, namespace string) (*AuthenticationConfiguration, error) { + config := &AuthenticationConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "AuthenticationConfiguration", + APIVersion: "apiserver.config.k8s.io/v1alpha1", + }, + JWT: []JWTAuthenticator{}, + } + if spec == nil { + return config, nil + } + for _, provider := range spec.OIDCProviders { + caData := "" + if provider.Issuer.CertificateAuthority.Name != "" { + ca := &corev1.ConfigMap{} + if err := c.Get(ctx, crclient.ObjectKey{Name: provider.Issuer.CertificateAuthority.Name, Namespace: namespace}, ca); err != nil { + return nil, fmt.Errorf("failed to get issuer certificate authority configmap: %w", err) + } + var ok bool + caData, ok = ca.Data["ca-bundle.crt"] + if !ok { + return nil, fmt.Errorf("issuer certificate authority configmap does not contain key ca-bundle.crt") + } + } + jwt := JWTAuthenticator{ + Issuer: Issuer{ + URL: provider.Issuer.URL, + CertificateAuthority: caData, + }, + } + audience := []string{} + for _, a := range provider.Issuer.Audiences { + audience = append(audience, string(a)) + } + jwt.Issuer.Audiences = audience + jwt.Issuer.AudienceMatchPolicy = AudienceMatchPolicyMatchAny + jwt.ClaimMappings.Username.Claim = provider.ClaimMappings.Username.Claim + if provider.ClaimMappings.Username.PrefixPolicy == configv1.Prefix { + jwt.ClaimMappings.Username.Prefix = &provider.ClaimMappings.Username.Prefix.PrefixString + } else { + noPrefix := "" + jwt.ClaimMappings.Username.Prefix = &noPrefix + } + jwt.ClaimMappings.Groups.Claim = provider.ClaimMappings.Groups.Claim + jwt.ClaimMappings.Groups.Prefix = &provider.ClaimMappings.Groups.Prefix + for _, rule := range provider.ClaimValidationRules { + switch rule.Type { + case configv1.TokenValidationRuleTypeRequiredClaim: + jwtRule := ClaimValidationRule{ + Claim: rule.RequiredClaim.Claim, + RequiredValue: rule.RequiredClaim.RequiredValue, + } + jwt.ClaimValidationRules = append(jwt.ClaimValidationRules, jwtRule) + } + } + config.JWT = append(config.JWT, jwt) + } + return config, nil +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/auth_types.go b/control-plane-operator/controllers/hostedcontrolplane/kas/auth_types.go new file mode 100644 index 0000000000..3760033810 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/auth_types.go @@ -0,0 +1,142 @@ +package kas + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// TODO +// We are currently copying this type from k8s.io/apiserver because it is +// not yet in the kube base we use for 4.16. +// In 4.17, we should switch back to using the type in k8s.io/apiserver +// and remove this copy +// https://github.com/openshift/kubernetes/pull/1881 + +// AuthenticationConfiguration provides versioned configuration for authentication. +type AuthenticationConfiguration struct { + metav1.TypeMeta + + // jwt is a list of authenticator to authenticate Kubernetes users using + // JWT compliant tokens. The authenticator will attempt to parse a raw ID token, + // verify it's been signed by the configured issuer. The public key to verify the + // signature is discovered from the issuer's public endpoint using OIDC discovery. + // For an incoming token, each JWT authenticator will be attempted in + // the order in which it is specified in this list. Note however that + // other authenticators may run before or after the JWT authenticators. + // The specific position of JWT authenticators in relation to other + // authenticators is neither defined nor stable across releases. Since + // each JWT authenticator must have a unique issuer URL, at most one + // JWT authenticator will attempt to cryptographically validate the token. + JWT []JWTAuthenticator `json:"jwt"` +} + +// JWTAuthenticator provides the configuration for a single JWT authenticator. +type JWTAuthenticator struct { + // issuer contains the basic OIDC provider connection options. + // +required + Issuer Issuer `json:"issuer"` + + // claimValidationRules are rules that are applied to validate token claims to authenticate users. + // +optional + ClaimValidationRules []ClaimValidationRule `json:"claimValidationRules,omitempty"` + + // claimMappings points claims of a token to be treated as user attributes. + // +required + ClaimMappings ClaimMappings `json:"claimMappings"` +} + +// Issuer provides the configuration for a external provider specific settings. +type Issuer struct { + // url points to the issuer URL in a format https://url or https://url/path. + // This must match the "iss" claim in the presented JWT, and the issuer returned from discovery. + // Same value as the --oidc-issuer-url flag. + // Used to fetch discovery information unless overridden by discoveryURL. + // Required to be unique. + // Note that egress selection configuration is not used for this network connection. + // +required + URL string `json:"url"` + + // certificateAuthority contains PEM-encoded certificate authority certificates + // used to validate the connection when fetching discovery information. + // If unset, the system verifier is used. + // Same value as the content of the file referenced by the --oidc-ca-file flag. + // +optional + CertificateAuthority string `json:"certificateAuthority,omitempty"` + + // audiences is the set of acceptable audiences the JWT must be issued to. + // At least one of the entries must match the "aud" claim in presented JWTs. + // Same value as the --oidc-client-id flag (though this field supports an array). + // Required to be non-empty. + // +required + Audiences []string `json:"audiences"` + + // audienceMatchPolicy defines how the "audiences" field is used to match the "aud" claim in the presented JWT. + // Allowed values are: + // 1. "MatchAny" when multiple audiences are specified and + // 2. empty (or unset) or "MatchAny" when a single audience is specified. + // + // - MatchAny: the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + // For example, if "audiences" is ["foo", "bar"], the "aud" claim in the presented JWT must contain either "foo" or "bar" (and may contain both). + // + // - "": The match policy can be empty (or unset) when a single audience is specified in the "audiences" field. The "aud" claim in the presented JWT must contain the single audience (and may contain others). + // + // For more nuanced audience validation, use claimValidationRules. + // example: claimValidationRule[].expression: 'sets.equivalent(claims.aud, ["bar", "foo", "baz"])' to require an exact match. + // +optional + AudienceMatchPolicy AudienceMatchPolicyType `json:"audienceMatchPolicy,omitempty"` +} + +// AudienceMatchPolicyType is a set of valid values for Issuer.AudienceMatchPolicy +type AudienceMatchPolicyType string + +// Valid types for AudienceMatchPolicyType +const ( + // MatchAny means the "aud" claim in the presented JWT must match at least one of the entries in the "audiences" field. + AudienceMatchPolicyMatchAny AudienceMatchPolicyType = "MatchAny" +) + +// ClaimValidationRule provides the configuration for a single claim validation rule. +type ClaimValidationRule struct { + // claim is the name of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim keys are supported. + // +required + Claim string `json:"claim"` + // requiredValue is the value of a required claim. + // Same as --oidc-required-claim flag. + // Only string claim values are supported. + // If claim is set and requiredValue is not set, the claim must be present with a value set to the empty string. + // +optional + RequiredValue string `json:"requiredValue"` +} + +// ClaimMappings provides the configuration for claim mapping +type ClaimMappings struct { + // username represents an option for the username attribute. + // The claim's value must be a singular string. + // Same as the --oidc-username-claim and --oidc-username-prefix flags. + // + // In the flag based approach, the --oidc-username-claim and --oidc-username-prefix are optional. If --oidc-username-claim is not set, + // the default value is "sub". For the authentication config, there is no defaulting for claim or prefix. The claim and prefix must be set explicitly. + // For claim, if --oidc-username-claim was not set with legacy flag approach, configure username.claim="sub" in the authentication config. + // For prefix: + // (1) --oidc-username-prefix="-", no prefix was added to the username. For the same behavior using authentication config, + // set username.prefix="" + // (2) --oidc-username-prefix="" and --oidc-username-claim != "email", prefix was "#". For the same + // behavior using authentication config, set username.prefix="#" + // (3) --oidc-username-prefix="". For the same behavior using authentication config, set username.prefix="" + // +required + Username PrefixedClaimOrExpression `json:"username"` + // groups represents an option for the groups attribute. + // The claim's value must be a string or string array claim. + // // If groups.claim is set, the prefix must be specified (and can be the empty string). + // +optional + Groups PrefixedClaimOrExpression `json:"groups,omitempty"` +} + +// PrefixedClaimOrExpression provides the configuration for a single prefixed claim or expression. +type PrefixedClaimOrExpression struct { + // claim is the JWT claim to use. + // +optional + Claim string `json:"claim"` + // prefix is prepended to claim's value to prevent clashes with existing names. + // +required + Prefix *string `json:"prefix"` +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/config.go b/control-plane-operator/controllers/hostedcontrolplane/kas/config.go index aead622323..a06a18276b 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/config.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/config.go @@ -29,6 +29,7 @@ import ( const ( KubeAPIServerConfigKey = "config.json" + AuthenticationConfigKey = "auth.json" OauthMetadataConfigKey = "oauthMetadata.json" AuditLogFile = "audit.log" EgressSelectorConfigKey = "config.yaml" @@ -93,7 +94,7 @@ func generateConfig(p KubeAPIServerConfigParams, version semver.Version) *kcpv1. Kind: "PodSecurityConfiguration", }, Defaults: podsecurityadmissionv1beta1.PodSecurityDefaults{ - Enforce: "restricted", + Enforce: "privileged", EnforceVersion: "latest", Audit: "restricted", AuditVersion: "latest", @@ -125,23 +126,25 @@ func generateConfig(p KubeAPIServerConfigParams, version semver.Version) *kcpv1. }, CORSAllowedOrigins: corsAllowedOrigins(p.AdditionalCORSAllowedOrigins), }, - AuthConfig: kcpv1.MasterAuthConfig{ - OAuthMetadataFile: cpath(kasVolumeOauthMetadata().Name, OauthMetadataConfigKey), - }, ConsolePublicURL: p.ConsolePublicURL, ImagePolicyConfig: imagePolicyConfig(p.InternalRegistryHostName, p.ExternalRegistryHostNames), ProjectConfig: projectConfig(p.DefaultNodeSelector), ServiceAccountPublicKeyFiles: []string{cpath(kasVolumeServiceAccountKey().Name, pki.ServiceSignerPublicKey)}, ServicesSubnet: strings.Join(p.ServiceNetwork, ","), } + + if p.Authentication == nil || p.Authentication.Type == configv1.AuthenticationTypeIntegratedOAuth { + config.AuthConfig.OAuthMetadataFile = cpath(kasVolumeOauthMetadata().Name, OauthMetadataConfigKey) + } + args := kubeAPIServerArgs{} args.Set("advertise-address", p.AdvertiseAddress) args.Set("allow-privileged", "true") args.Set("anonymous-auth", "true") args.Set("api-audiences", p.ServiceAccountIssuerURL) args.Set("audit-log-format", "json") - args.Set("audit-log-maxbackup", "10") - args.Set("audit-log-maxsize", "100") + args.Set("audit-log-maxbackup", "1") + args.Set("audit-log-maxsize", "10") args.Set("audit-log-path", cpath(kasVolumeWorkLogs().Name, AuditLogFile)) args.Set("audit-policy-file", cpath(kasVolumeAuditConfig().Name, AuditPolicyConfigMapKey)) args.Set("authorization-mode", "Scope", "SystemMasters", "RBAC", "Node") @@ -171,36 +174,23 @@ func generateConfig(p KubeAPIServerConfigParams, version semver.Version) *kcpv1. args.Set("authentication-token-webhook-config-file", cpath(kasVolumeAuthTokenWebhookConfig().Name, KubeconfigKey)) args.Set("authentication-token-webhook-version", "v1") } else { - if len(p.Authentication.OIDCProviders) > 0 { - provider := p.Authentication.OIDCProviders[0] - args.Set("oidc-issuer-url", provider.Issuer.URL) - args.Set("oidc-client-id", string(provider.Issuer.Audiences[0])) - args.Set("oidc-username-claim", provider.ClaimMappings.Username.Claim) - if provider.ClaimMappings.Username.PrefixPolicy == configv1.Prefix && - provider.ClaimMappings.Username.Prefix != nil { - args.Set("oidc-username-prefix", provider.ClaimMappings.Username.Prefix.PrefixString) - } - args.Set("oidc-groups-claim", provider.ClaimMappings.Groups.Claim) - args.Set("oidc-groups-prefix", provider.ClaimMappings.Groups.Prefix) - for _, cvr := range provider.ClaimValidationRules { - // TODO: currently can only support a single required claim because of how kubeAPIServerArgs dedups config fields - // In order to specify multiple required claims, the flag must be used multiple times - // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#configuring-the-api-server - args.Set("oidc-required-claim", fmt.Sprintf("%s=%s", cvr.RequiredClaim.Claim, cvr.RequiredClaim.RequiredValue)) - } - args.Set("oidc-ca-file", oidcCAFile(provider.Issuer.CertificateAuthority.Name)) + if p.Authentication != nil && len(p.Authentication.OIDCProviders) > 0 { + args.Set("authentication-config", cpath(kasVolumeAuthConfig().Name, AuthenticationConfigKey)) } } args.Set("enable-aggregator-routing", "true") args.Set("enable-logs-handler", "false") - args.Set("endpoint-reconciler-type", "lease") + args.Set("endpoint-reconciler-type", "none") args.Set("etcd-cafile", cpath(kasVolumeEtcdCA().Name, certs.CASignerCertMapKey)) args.Set("etcd-certfile", cpath(kasVolumeEtcdClientCert().Name, pki.EtcdClientCrtKey)) args.Set("etcd-keyfile", cpath(kasVolumeEtcdClientCert().Name, pki.EtcdClientKeyKey)) args.Set("etcd-prefix", "kubernetes.io") args.Set("etcd-servers", p.EtcdURL) args.Set("event-ttl", "3h") - args.Set("feature-gates", p.FeatureGates...) + // TODO remove in 4.16 once we're able to have different featuregates for hypershift + featureGates := append([]string{}, p.FeatureGates...) + featureGates = append(featureGates, "StructuredAuthenticationConfiguration=true") + args.Set("feature-gates", featureGates...) args.Set("goaway-chance", "0") args.Set("http2-max-streams-per-connection", "2000") args.Set("kubelet-certificate-authority", cpath(kasVolumeKubeletClientCA().Name, certs.CASignerCertMapKey)) @@ -209,8 +199,8 @@ func generateConfig(p KubeAPIServerConfigParams, version semver.Version) *kcpv1. args.Set("kubelet-preferred-address-types", "InternalIP") args.Set("kubelet-read-only-port", "0") args.Set("kubernetes-service-node-port", "0") - args.Set("max-mutating-requests-inflight", "1000") - args.Set("max-requests-inflight", "3000") + args.Set("max-mutating-requests-inflight", p.MaxMutatingRequestsInflight) + args.Set("max-requests-inflight", p.MaxRequestsInflight) args.Set("min-request-timeout", "3600") args.Set("proxy-client-cert-file", cpath(kasVolumeAggregatorCert().Name, corev1.TLSCertKey)) args.Set("proxy-client-key-file", cpath(kasVolumeAggregatorCert().Name, corev1.TLSPrivateKeyKey)) @@ -219,7 +209,17 @@ func generateConfig(p KubeAPIServerConfigParams, version semver.Version) *kcpv1. args.Set("requestheader-extra-headers-prefix", "X-Remote-Extra-") args.Set("requestheader-group-headers", "X-Remote-Group") args.Set("requestheader-username-headers", "X-Remote-User") - args.Set("runtime-config", "flowcontrol.apiserver.k8s.io/v1alpha1=true") + runtimeConfig := []string{} + runtimeConfig = append(runtimeConfig, "flowcontrol.apiserver.k8s.io/v1alpha1=true") + for _, gate := range p.FeatureGates { + if gate == "ValidatingAdmissionPolicy=true" { + runtimeConfig = append(runtimeConfig, "admissionregistration.k8s.io/v1beta1=true") + } + if gate == "DynamicResourceAllocation=true" { + runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha2=true") + } + } + args.Set("runtime-config", runtimeConfig...) args.Set("service-account-issuer", p.ServiceAccountIssuerURL) args.Set("service-account-jwks-uri", jwksURL(p.ServiceAccountIssuerURL)) args.Set("service-account-lookup", "true") @@ -244,14 +244,6 @@ func cloudProviderConfig(cloudProviderConfigName, cloudProvider string) string { return "" } -func oidcCAFile(oidcCAName string) string { - if oidcCAName != "" { - caDir := oidcCAVolumeMount.Path(kasContainerMain().Name, kasVolumeOIDCCA().Name) - return path.Join(caDir, "ca.crt") - } - return "" -} - func externalIPRangerConfig(externalIPConfig *configv1.ExternalIPConfig) runtime.Object { cfg := &unstructured.Unstructured{} cfg.SetAPIVersion("network.openshift.io/v1") @@ -303,6 +295,7 @@ func admissionPlugins() []string { "ServiceAccount", "StorageObjectInUseProtection", "TaintNodesByCondition", + "ValidatingAdmissionPolicy", "ValidatingAdmissionWebhook", "authorization.openshift.io/RestrictSubjectBindings", "authorization.openshift.io/ValidateRoleBindingRestriction", diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go index 3a87569f3e..b34ea7aa8b 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/deployment.go @@ -5,6 +5,7 @@ import ( "fmt" "path" "strconv" + "strings" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" @@ -28,6 +29,7 @@ import ( const ( kasNamedCertificateMountPathPrefix = "/etc/kubernetes/certs/named" + authConfigHashAnnotation = "kube-apiserver.hypershift.openshift.io/auth-config-hash" auditConfigHashAnnotation = "kube-apiserver.hypershift.openshift.io/audit-config-hash" configHashAnnotation = "kube-apiserver.hypershift.openshift.io/config-hash" awsPodIdentityWebhookServingCertVolumeName = "aws-pod-identity-webhook-serving-certs" @@ -45,6 +47,7 @@ var ( }, kasContainerMain().Name: { kasVolumeWorkLogs().Name: "/var/log/kube-apiserver", + kasVolumeAuthConfig().Name: "/etc/kubernetes/auth", kasVolumeConfig().Name: "/etc/kubernetes/config", kasVolumeAuditConfig().Name: "/etc/kubernetes/audit", kasVolumeKonnectivityCA().Name: "/etc/kubernetes/certs/konnectivity-ca", @@ -69,12 +72,6 @@ var ( }, } - oidcCAVolumeMount = util.PodVolumeMounts{ - kasContainerMain().Name: { - kasVolumeOIDCCA().Name: "/etc/kubernetes/certs/oidc-ca", - }, - } - cloudProviderConfigVolumeMount = util.PodVolumeMounts{ kasContainerMain().Name: { kasVolumeCloudConfig().Name: "/etc/kubernetes/cloud", @@ -112,6 +109,7 @@ func ReconcileKubeAPIServerDeployment(deployment *appsv1.Deployment, images KubeAPIServerImages, config *corev1.ConfigMap, auditConfig *corev1.ConfigMap, + authConfig *corev1.ConfigMap, auditWebhookRef *corev1.LocalObjectReference, aesCBCActiveKey []byte, aesCBCBackupKey []byte, @@ -119,6 +117,7 @@ func ReconcileKubeAPIServerDeployment(deployment *appsv1.Deployment, payloadVersion string, featureGateSpec *configv1.FeatureGateSpec, oidcCA *corev1.LocalObjectReference, + cipherSuites []string, ) error { secretEncryptionData := hcp.Spec.SecretEncryption @@ -139,11 +138,23 @@ func ReconcileKubeAPIServerDeployment(deployment *appsv1.Deployment, } auditConfigHash := util.ComputeHash(auditConfigBytes) + authConfigBytes, ok := authConfig.Data[AuthConfigMapKey] + if !ok { + return fmt.Errorf("kube apiserver authentication configuration is not expected to be empty") + } + authConfigHash := util.ComputeHash(authConfigBytes) + // preserve existing resource requirements for main KAS container - mainContainer := util.FindContainer(kasContainerMain().Name, deployment.Spec.Template.Spec.Containers) - if mainContainer != nil { - deploymentConfig.SetContainerResourcesIfPresent(mainContainer) + kasContainer := util.FindContainer(kasContainerMain().Name, deployment.Spec.Template.Spec.Containers) + if kasContainer != nil { + deploymentConfig.SetContainerResourcesIfPresent(kasContainer) } + // preserve existing resource requirements for the konnectivy-server container + konnectivityContainer := util.FindContainer(konnectivityServerContainer().Name, deployment.Spec.Template.Spec.Containers) + if konnectivityContainer != nil { + deploymentConfig.SetContainerResourcesIfPresent(konnectivityContainer) + } + if deployment.Spec.Selector == nil { deployment.Spec.Selector = &metav1.LabelSelector{ MatchLabels: kasLabels(), @@ -174,6 +185,7 @@ func ReconcileKubeAPIServerDeployment(deployment *appsv1.Deployment, Annotations: map[string]string{ configHashAnnotation: configHash, auditConfigHashAnnotation: auditConfigHash, + authConfigHashAnnotation: authConfigHash, }, }, Spec: corev1.PodSpec{ @@ -191,35 +203,15 @@ func ReconcileKubeAPIServerDeployment(deployment *appsv1.Deployment, }, Containers: []corev1.Container{ util.BuildContainer(kasContainerApplyBootstrap(), buildKASContainerApplyBootstrap(images.CLI)), - util.BuildContainer(kasContainerMain(), buildKASContainerMain(images.HyperKube, port, additionalNoProxyCIDRS)), - util.BuildContainer(konnectivityServerContainer(), buildKonnectivityServerContainer(images.KonnectivityServer, deploymentConfig.Replicas)), - { - Name: "audit-logs", - Image: images.CLI, - ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{ - "/usr/bin/tail", - "-c+1", - "-F", - fmt.Sprintf("%s/%s", volumeMounts.Path(kasContainerMain().Name, kasVolumeWorkLogs().Name), "audit.log"), - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("5m"), - corev1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, - VolumeMounts: []corev1.VolumeMount{{ - Name: kasVolumeWorkLogs().Name, - MountPath: volumeMounts.Path(kasContainerMain().Name, kasVolumeWorkLogs().Name), - }}, - }, + util.BuildContainer(kasContainerMain(), buildKASContainerMain(images.HyperKube, port, additionalNoProxyCIDRS, hcp)), + util.BuildContainer(konnectivityServerContainer(), buildKonnectivityServerContainer(images.KonnectivityServer, deploymentConfig.Replicas, cipherSuites)), }, Volumes: []corev1.Volume{ util.BuildVolume(kasVolumeBootstrapManifests(), buildKASVolumeBootstrapManifests), util.BuildVolume(kasVolumeLocalhostKubeconfig(), buildKASVolumeLocalhostKubeconfig), util.BuildVolume(kasVolumeWorkLogs(), buildKASVolumeWorkLogs), util.BuildVolume(kasVolumeConfig(), buildKASVolumeConfig), + util.BuildVolume(kasVolumeAuthConfig(), buildKASVolumeAuthConfig), util.BuildVolume(kasVolumeAuditConfig(), buildKASVolumeAuditConfig), util.BuildVolume(kasVolumeKonnectivityCA(), buildKASVolumeKonnectivityCA), util.BuildVolume(kasVolumeServerCert(), buildKASVolumeServerCert), @@ -242,6 +234,29 @@ func ReconcileKubeAPIServerDeployment(deployment *appsv1.Deployment, }, } + if auditConfig.Data[AuditPolicyProfileMapKey] != string(configv1.NoneAuditProfileType) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, corev1.Container{ + Name: "audit-logs", + Image: images.CLI, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/bash"}, + Args: []string{ + "-c", + RenderAuditLogScript(fmt.Sprintf("%s/%s", volumeMounts.Path(kasContainerMain().Name, kasVolumeWorkLogs().Name), "audit.log")), + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: kasVolumeWorkLogs().Name, + MountPath: volumeMounts.Path(kasContainerMain().Name, kasVolumeWorkLogs().Name), + }}, + }) + } + // With managed etcd, we should wait for the known etcd client service name to // at least resolve before starting up to avoid futile connection attempts and // pod crashing. For unmanaged, make no assumptions. @@ -256,9 +271,6 @@ func ReconcileKubeAPIServerDeployment(deployment *appsv1.Deployment, applyNamedCertificateMounts(namedCertificates, &deployment.Spec.Template.Spec) applyCloudConfigVolumeMount(cloudProviderConfigRef, &deployment.Spec.Template.Spec, cloudProviderName) util.ApplyCloudProviderCreds(&deployment.Spec.Template.Spec, cloudProviderName, cloudProviderCreds, images.TokenMinterImage, kasContainerMain().Name) - if oidcCA != nil { - applyOIDCCAVolumeMount(oidcCA, &deployment.Spec.Template.Spec) - } if cloudProviderName == aws.Provider { deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, corev1.Container{ @@ -407,7 +419,7 @@ func kasContainerMain() *corev1.Container { } } -func buildKASContainerMain(image string, port int32, noProxyCIDRs []string) func(c *corev1.Container) { +func buildKASContainerMain(image string, port int32, noProxyCIDRs []string, hcp *hyperv1.HostedControlPlane) func(c *corev1.Container) { return func(c *corev1.Container) { c.Image = image c.TerminationMessagePolicy = corev1.TerminationMessageReadFile @@ -416,10 +428,19 @@ func buildKASContainerMain(image string, port int32, noProxyCIDRs []string) func c.Command = []string{ "hyperkube", } + + kasVerbosityLevel := 2 + if hcp.Annotations[hyperv1.KubeAPIServerVerbosityLevelAnnotation] != "" { + parsedKASVerbosityValue, err := strconv.Atoi(hcp.Annotations[hyperv1.KubeAPIServerVerbosityLevelAnnotation]) + if err == nil { + kasVerbosityLevel = parsedKASVerbosityValue + } + } + c.Args = []string{ "kube-apiserver", fmt.Sprintf("--openshift-config=%s", path.Join(volumeMounts.Path(c.Name, kasVolumeConfig().Name), KubeAPIServerConfigKey)), - "-v2", + fmt.Sprintf("--v=%d", kasVerbosityLevel), } c.Env = []corev1.EnvVar{{ @@ -438,6 +459,20 @@ func buildKASContainerMain(image string, port int32, noProxyCIDRs []string) func // https://github.com/kubernetes/kubernetes/blob/ab13c85316015cf9f115e29923ba9740bd1564fd/staging/src/k8s.io/apimachinery/pkg/util/net/http.go#L112-L114 proxy.SetEnvVars(&c.Env, noProxyCIDRs...) + if hcp.Annotations[hyperv1.KubeAPIServerGOGCAnnotation] != "" { + c.Env = append(c.Env, corev1.EnvVar{ + Name: "GOGC", + Value: hcp.Annotations[hyperv1.KubeAPIServerGOGCAnnotation], + }) + } + + if hcp.Annotations[hyperv1.KubeAPIServerGOMemoryLimitAnnotation] != "" { + c.Env = append(c.Env, corev1.EnvVar{ + Name: "GOMEMLIMIT", + Value: hcp.Annotations[hyperv1.KubeAPIServerGOMemoryLimitAnnotation], + }) + } + c.WorkingDir = volumeMounts.Path(c.Name, kasVolumeWorkLogs().Name) c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) c.Ports = []corev1.ContainerPort{ @@ -493,6 +528,18 @@ func buildKASVolumeConfig(v *corev1.Volume) { v.ConfigMap.DefaultMode = pointer.Int32(420) v.ConfigMap.Name = manifests.KASConfig("").Name } +func kasVolumeAuthConfig() *corev1.Volume { + return &corev1.Volume{ + Name: "auth-config", + } +} +func buildKASVolumeAuthConfig(v *corev1.Volume) { + if v.ConfigMap == nil { + v.ConfigMap = &corev1.ConfigMapVolumeSource{} + } + v.ConfigMap.DefaultMode = pointer.Int32(420) + v.ConfigMap.Name = manifests.AuthConfig("").Name +} func kasVolumeAuditConfig() *corev1.Volume { return &corev1.Volume{ Name: "audit-config", @@ -517,12 +564,6 @@ func buildKASVolumeKonnectivityCA(v *corev1.Volume) { v.ConfigMap.Name = manifests.KonnectivityCAConfigMap("").Name } -func kasVolumeOIDCCA() *corev1.Volume { - return &corev1.Volume{ - Name: "oidc-ca", - } -} - func kasVolumeServerCert() *corev1.Volume { return &corev1.Volume{ Name: "server-crt", @@ -704,31 +745,6 @@ func applyCloudConfigVolumeMount(configRef *corev1.LocalObjectReference, podSpec } } -func buildKASVolumeOIDCCA(configMapName string) func(v *corev1.Volume) { - return func(v *corev1.Volume) { - v.ConfigMap = &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{Name: configMapName}, - DefaultMode: pointer.Int32(0640), - } - } -} - -func applyOIDCCAVolumeMount(oidcCA *corev1.LocalObjectReference, podSpec *corev1.PodSpec) { - podSpec.Volumes = append(podSpec.Volumes, util.BuildVolume(kasVolumeOIDCCA(), buildKASVolumeOIDCCA(oidcCA.Name))) - var container *corev1.Container - for i, c := range podSpec.Containers { - if c.Name == kasContainerMain().Name { - container = &podSpec.Containers[i] - break - } - } - if container == nil { - panic("main kube apiserver container not found in spec") - } - container.VolumeMounts = append(container.VolumeMounts, - oidcCAVolumeMount.ContainerMounts(kasContainerMain().Name)...) -} - func invokeBootstrapRenderScript(workDir, payloadVersion, featureGateYaml string) string { var script = `#!/bin/sh @@ -740,11 +756,10 @@ cat </tmp/manifests/99_feature-gate.yaml %[3]s EOF -/usr/bin/cluster-config-operator render \ - --config-output-file config \ - --asset-input-dir /tmp/input \ +/usr/bin/render \ --asset-output-dir /tmp/output \ - --rendered-manifest-files=/tmp/manifests \ + --rendered-manifest-dir=/tmp/manifests \ + --cluster-profile=ibm-cloud-managed \ --payload-version=%[2]s cp /tmp/output/manifests/* %[1]s cp /tmp/manifests/* %[1]s @@ -876,7 +891,7 @@ func konnectivityServerContainer() *corev1.Container { } } -func buildKonnectivityServerContainer(image string, serverCount int) func(c *corev1.Container) { +func buildKonnectivityServerContainer(image string, serverCount int, cipherSuites []string) func(c *corev1.Container) { cpath := func(volume, file string) string { return path.Join(volumeMounts.Path(konnectivityServerContainer().Name, volume), file) } @@ -915,6 +930,11 @@ func buildKonnectivityServerContainer(image string, serverCount int) func(c *cor "--server-count", strconv.Itoa(serverCount), } + + if len(cipherSuites) != 0 { + c.Args = append(c.Args, fmt.Sprintf("--cipher-suites=%s", strings.Join(cipherSuites, ","))) + } + c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) c.Lifecycle = &corev1.Lifecycle{ PreStop: &corev1.LifecycleHandler{ @@ -955,3 +975,21 @@ func buildKonnectivityVolumeClusterCerts(v *corev1.Volume) { DefaultMode: pointer.Int32(0640), } } + +func RenderAuditLogScript(auditLogFilePath string) string { + var script = ` +set -o errexit +set -o nounset +set -o pipefail + +function cleanup() { + kill -- -$$ + wait +} +trap cleanup SIGTERM + +/usr/bin/tail -c+1 -F %s & +wait $! +` + return fmt.Sprintf(script, auditLogFilePath) +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/egresscfg.go b/control-plane-operator/controllers/hostedcontrolplane/kas/egresscfg.go index 5652206781..596c10d146 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/egresscfg.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/egresscfg.go @@ -5,6 +5,7 @@ import ( "fmt" "path" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/support/api" "github.com/openshift/hypershift/support/certs" hcpconfig "github.com/openshift/hypershift/support/config" @@ -41,7 +42,7 @@ func egressSelectorConfiguration() *kasv1beta1.EgressSelectorConfiguration { ProxyProtocol: kasv1beta1.ProtocolHTTPConnect, Transport: &kasv1beta1.Transport{ TCP: &kasv1beta1.TCPTransport{ - URL: fmt.Sprintf("https://127.0.0.1:%d", KonnectivityServerLocalPort), + URL: fmt.Sprintf("https://%s:%d", manifests.KonnectivityServerLocalService("").Name, KonnectivityServerLocalPort), TLSConfig: &kasv1beta1.TLSConfig{ CABundle: cpath(kasVolumeKonnectivityCA().Name, certs.CASignerCertMapKey), ClientCert: cpath(kasVolumeKonnectivityClientCert().Name, corev1.TLSCertKey), diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/kms/ibmcloud.go b/control-plane-operator/controllers/hostedcontrolplane/kas/kms/ibmcloud.go index f43e2eafa0..ac3a99e4ef 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/kms/ibmcloud.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/kms/ibmcloud.go @@ -49,7 +49,7 @@ type ibmCloudKMSProvider struct { } func NewIBMCloudKMSProvider(ibmCloud *hyperv1.IBMCloudKMSSpec, kmsImage string) (*ibmCloudKMSProvider, error) { - if ibmCloud == nil || len(ibmCloud.KeyList) == 0 || len(ibmCloud.Region) == 0 || len(kmsImage) == 0 { + if ibmCloud == nil || len(ibmCloud.KeyList) == 0 || len(ibmCloud.Region) == 0 { return nil, fmt.Errorf("ibmcloud kms metadata not specified") } return &ibmCloudKMSProvider{ @@ -65,7 +65,19 @@ func (p *ibmCloudKMSProvider) GenerateKMSEncryptionConfig() (*v1.EncryptionConfi keys = append(keys, k) } sort.Ints(keys) - var providerConfiguration []v1.ProviderConfiguration + + // KMS v2 should be first in the list + providerConfiguration := []v1.ProviderConfiguration{ + { + KMS: &v1.KMSConfiguration{ + APIVersion: "v2", + Name: fmt.Sprintf("%s%s", ibmKeyNamePrefix, "v2"), + Endpoint: ibmCloudKMSUnixSocket, + Timeout: &metav1.Duration{Duration: 35 * time.Second}, + }, + }, + } + // iterate in reverse because highest version key should be used for new secret encryption for i := len(keys) - 1; i >= 0; i-- { configEntry := v1.ProviderConfiguration{ @@ -311,5 +323,6 @@ func (p *ibmCloudKMSProvider) ApplyKMSConfig(podSpec *corev1.PodSpec) error { } container.VolumeMounts = append(container.VolumeMounts, ibmCloudKMSVolumeMounts.ContainerMounts(KasMainContainerName)...) + container.Args = append(container.Args, "--encryption-provider-config-automatic-reload=false") return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/kubeconfig.go b/control-plane-operator/controllers/hostedcontrolplane/kas/kubeconfig.go index cf06145af4..065e426d96 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/kubeconfig.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/kubeconfig.go @@ -3,6 +3,7 @@ package kas import ( "fmt" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" corev1 "k8s.io/api/core/v1" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" @@ -16,12 +17,12 @@ const ( KubeconfigKey = util.KubeconfigKey ) -func ReconcileServiceKubeconfigSecret(secret, cert *corev1.Secret, ca *corev1.ConfigMap, ownerRef config.OwnerRef) error { - svcURL := InClusterKASURL() +func ReconcileServiceKubeconfigSecret(secret, cert *corev1.Secret, ca *corev1.ConfigMap, ownerRef config.OwnerRef, platformType hyperv1.PlatformType) error { + svcURL := InClusterKASURL(platformType) return pki.ReconcileKubeConfig(secret, cert, ca, svcURL, "", "service", ownerRef) } -func ReconcileServiceCAPIKubeconfigSecret(secret, cert *corev1.Secret, ca *corev1.ConfigMap, ownerRef config.OwnerRef, capiClusterName string) error { +func ReconcileServiceCAPIKubeconfigSecret(secret, cert *corev1.Secret, ca *corev1.ConfigMap, ownerRef config.OwnerRef, capiClusterName string, platformType hyperv1.PlatformType) error { // The client used by CAPI machine controller expects the kubeconfig to have this key // https://github.com/kubernetes-sigs/cluster-api/blob/5c85a0a01ee44ecf7c8a3c3fdc867a88af87d73c/util/secret/secret.go#L29-L33 // and to be labeled with cluster.x-k8s.io/cluster-name= so the secret can be cached by the client. @@ -31,15 +32,18 @@ func ReconcileServiceCAPIKubeconfigSecret(secret, cert *corev1.Secret, ca *corev } secret.Labels[capiv1.ClusterNameLabel] = capiClusterName - return pki.ReconcileKubeConfig(secret, cert, ca, InClusterKASURL(), "value", "capi", ownerRef) + return pki.ReconcileKubeConfig(secret, cert, ca, InClusterKASURL(platformType), "value", "capi", ownerRef) } -func InClusterKASURL() string { +func InClusterKASURL(platformType hyperv1.PlatformType) string { + if platformType == hyperv1.IBMCloudPlatform { + return fmt.Sprintf("https://%s:%d", manifests.KubeAPIServerServiceName, config.KASSVCIBMCloudPort) + } return fmt.Sprintf("https://%s:%d", manifests.KubeAPIServerServiceName, config.KASSVCPort) } -func InClusterKASReadyURL() string { - return InClusterKASURL() + "/readyz" +func InClusterKASReadyURL(platformType hyperv1.PlatformType) string { + return InClusterKASURL(platformType) + "/readyz" } func ReconcileLocalhostKubeconfigSecret(secret, cert *corev1.Secret, ca *corev1.ConfigMap, ownerRef config.OwnerRef, apiServerPort int32) error { diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/oauth.go b/control-plane-operator/controllers/hostedcontrolplane/kas/oauth.go index 55e663793e..5352320a94 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/oauth.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/oauth.go @@ -12,11 +12,15 @@ import ( "github.com/openshift/hypershift/support/config" ) -func ReconcileOauthMetadata(cfg *corev1.ConfigMap, ownerRef config.OwnerRef, externalOAuthAddress string, externalOAuthPort int32) error { +func ReconcileOauthMetadata(cfg *corev1.ConfigMap, ownerRef config.OwnerRef, userOauthMetadata string, externalOAuthAddress string, externalOAuthPort int32) error { ownerRef.ApplyTo(cfg) if cfg.Data == nil { cfg.Data = map[string]string{} } + if userOauthMetadata != "" { + cfg.Data[OauthMetadataConfigKey] = userOauthMetadata + return nil + } oauthURL := fmt.Sprintf("https://%s:%d", externalOAuthAddress, externalOAuthPort) cfg.Data[OauthMetadataConfigKey] = fmt.Sprintf(oauthMetadata, oauthURL) return nil diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/params.go b/control-plane-operator/controllers/hostedcontrolplane/kas/params.go index 482f52381c..bb830571cd 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/params.go @@ -69,6 +69,9 @@ type KubeAPIServerParams struct { Availability hyperv1.AvailabilityPolicy APIServerSTSDirectives string + + MaxMutatingRequestsInflight string + MaxRequestsInflight string } type KubeAPIServerServiceParams struct { @@ -80,6 +83,9 @@ const ( KonnectivityHealthPort = 2041 KonnectivityServerLocalPort = 8090 KonnectivityServerPort = 8091 + + defaultMaxRequestsInflight = 3000 + defaultMaxMutatingRequestsInflight = 1000 ) func NewKubeAPIServerParams(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, externalAPIAddress string, externalAPIPort int32, externalOAuthAddress string, externalOAuthPort int32, setDefaultSecurityContext bool) *KubeAPIServerParams { @@ -101,12 +107,14 @@ func NewKubeAPIServerParams(ctx context.Context, hcp *hyperv1.HostedControlPlane Images: KubeAPIServerImages{ HyperKube: releaseImageProvider.GetImage("hyperkube"), CLI: releaseImageProvider.GetImage("cli"), - ClusterConfigOperator: releaseImageProvider.GetImage("cluster-config-operator"), + ClusterConfigOperator: releaseImageProvider.GetImage("cluster-config-api"), TokenMinterImage: releaseImageProvider.GetImage("token-minter"), AWSKMS: releaseImageProvider.GetImage("aws-kms-provider"), AWSPodIdentityWebhookImage: releaseImageProvider.GetImage("aws-pod-identity-webhook"), KonnectivityServer: releaseImageProvider.GetImage("apiserver-network-proxy"), }, + MaxRequestsInflight: fmt.Sprint(defaultMaxRequestsInflight), + MaxMutatingRequestsInflight: fmt.Sprint(defaultMaxMutatingRequestsInflight), } if hcp.Spec.Configuration != nil { params.APIServer = hcp.Spec.Configuration.APIServer @@ -116,6 +124,12 @@ func NewKubeAPIServerParams(ctx context.Context, hcp *hyperv1.HostedControlPlane params.Image = hcp.Spec.Configuration.Image params.Scheduler = hcp.Spec.Configuration.Scheduler } + if reqInflight := hcp.Annotations[hyperv1.KubeAPIServerMaximumRequestsInFlight]; reqInflight != "" { + params.MaxRequestsInflight = reqInflight + } + if mutatingReqInflight := hcp.Annotations[hyperv1.KubeAPIServerMaximumMutatingRequestsInFlight]; mutatingReqInflight != "" { + params.MaxMutatingRequestsInflight = mutatingReqInflight + } params.AdvertiseAddress = util.GetAdvertiseAddress(hcp, config.DefaultAdvertiseIPv4Address, config.DefaultAdvertiseIPv6Address) @@ -328,7 +342,7 @@ func (p *KubeAPIServerParams) ExternalURL() string { // InternalURL is used by ReconcileBootstrapKubeconfigSecret. func (p *KubeAPIServerParams) InternalURL() string { - return fmt.Sprintf("https://%s:%d", pki.AddBracketsIfIPv6(p.InternalAddress), 443) + return fmt.Sprintf("https://%s:%d", pki.AddBracketsIfIPv6(p.InternalAddress), p.ExternalPort) } func (p *KubeAPIServerParams) ExternalKubeconfigKey() string { @@ -378,6 +392,8 @@ func (p *KubeAPIServerParams) ConfigParams() KubeAPIServerConfigParams { DisableProfiling: p.DisableProfiling, APIServerSTSDirectives: p.APIServerSTSDirectives, Authentication: p.Authentication, + MaxRequestsInflight: p.MaxRequestsInflight, + MaxMutatingRequestsInflight: p.MaxMutatingRequestsInflight, } } @@ -404,6 +420,8 @@ type KubeAPIServerConfigParams struct { DisableProfiling bool APIServerSTSDirectives string Authentication *configv1.AuthenticationSpec + MaxRequestsInflight string + MaxMutatingRequestsInflight string } func (p *KubeAPIServerParams) TLSSecurityProfile() *configv1.TLSSecurityProfile { @@ -461,6 +479,13 @@ func (p *KubeAPIServerParams) FeatureGates() []string { } } +func (p *KubeAPIServerParams) CipherSuites() []string { + if p.APIServer != nil { + return config.CipherSuites(p.APIServer.TLSSecurityProfile) + } + return config.CipherSuites(nil) +} + func (p *KubeAPIServerParams) ServiceNodePortRange() string { if p.Network != nil && len(p.Network.ServiceNodePortRange) > 0 { return p.Network.ServiceNodePortRange diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go b/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go index 1ac3829b7d..11fa45bd68 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/pdb.go @@ -1,10 +1,9 @@ package kas import ( - hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/support/util" policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, p *KubeAPIServerParams) error { @@ -15,15 +14,6 @@ func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, p *KubeAPIS } p.OwnerRef.ApplyTo(pdb) - - var minAvailable int - switch p.Availability { - case hyperv1.SingleReplica: - minAvailable = 0 - case hyperv1.HighlyAvailable: - minAvailable = 1 - } - pdb.Spec.MinAvailable = &intstr.IntOrString{Type: intstr.Int, IntVal: int32(minAvailable)} - + util.ReconcilePodDisruptionBudget(pdb, p.Availability) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/kas/service.go b/control-plane-operator/controllers/hostedcontrolplane/kas/service.go index e99bebbb2b..36f4ab0a94 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kas/service.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kas/service.go @@ -169,6 +169,9 @@ func ReconcilePrivateService(svc *corev1.Service, hcp *hyperv1.HostedControlPlan } portSpec.Port = int32(config.KASSVCPort) + if hcp.Spec.Platform.Type == hyperv1.IBMCloudPlatform { + portSpec.Port = int32(config.KASSVCIBMCloudPort) + } portSpec.Protocol = corev1.ProtocolTCP portSpec.TargetPort = intstr.FromString("client") svc.Spec.Type = corev1.ServiceTypeLoadBalancer @@ -278,9 +281,9 @@ func ReconcileKonnectivityServerService(svc *corev1.Service, ownerRef config.Own return nil } -func ReconcileKonnectivityExternalRoute(route *routev1.Route, ownerRef config.OwnerRef, hostname string, defaultIngressDomain string) error { +func ReconcileKonnectivityExternalRoute(route *routev1.Route, ownerRef config.OwnerRef, hostname string, defaultIngressDomain string, labelHCPRoutes bool) error { ownerRef.ApplyTo(route) - if err := util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, manifests.KonnectivityServerService(route.Namespace).Name); err != nil { + if err := util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, manifests.KonnectivityServerService(route.Namespace).Name, labelHCPRoutes); err != nil { return err } if route.Annotations == nil { diff --git a/control-plane-operator/controllers/hostedcontrolplane/kcm/config.go b/control-plane-operator/controllers/hostedcontrolplane/kcm/config.go index 613b10de84..eb904bd819 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kcm/config.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kcm/config.go @@ -3,14 +3,17 @@ package kcm import ( "encoding/json" "fmt" + "html/template" "path" + "strings" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kcpv1 "github.com/openshift/api/kubecontrolplane/v1" - "github.com/openshift/hypershift/support/certs" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/imageprovider" + "github.com/openshift/hypershift/support/config" ) @@ -59,29 +62,20 @@ func generateConfig(serviceServingCA *corev1.ConfigMap) (string, error) { return string(b), nil } -func ReconcileKCMServiceServingCA(cm, combinedCA *corev1.ConfigMap, ownerRef config.OwnerRef) error { - ownerRef.ApplyTo(cm) - if cm.Data == nil { - cm.Data = map[string]string{} - } - if _, hasKey := cm.Data[ServiceServingCAKey]; !hasKey { - cm.Data[ServiceServingCAKey] = combinedCA.Data[certs.CASignerCertMapKey] - } - return nil -} - -func ReconcileServiceAccount(sa *corev1.ServiceAccount) error { - // nothing to reconcile - return nil -} +func ReconcileRecyclerConfig(config *corev1.ConfigMap, ownerRef config.OwnerRef, releaseImageProvider *imageprovider.ReleaseImageProvider) error { + var result strings.Builder -func ReconcileRecyclerConfig(config *corev1.ConfigMap, ownerRef config.OwnerRef) error { ownerRef.ApplyTo(config) if config.Data == nil { config.Data = map[string]string{} } + + data := map[string]string{ + "rhtoolsImageName": releaseImageProvider.GetImage("tools"), + } + // https://github.com/openshift/cluster-kube-controller-manager-operator/blob/64b4c1ba/bindata/assets/kube-controller-manager/recycler-cm.yaml - config.Data[RecyclerPodTemplateKey] = `apiVersion: v1 + templateContent := `apiVersion: v1 kind: Pod metadata: name: recycler-pod @@ -94,7 +88,7 @@ spec: serviceAccountName: pv-recycler-controller containers: - name: recycler-container - image: quay.io/openshift/origin-tools:latest + image: {{.rhtoolsImageName}} command: - "/bin/bash" args: @@ -113,5 +107,18 @@ spec: volumes: - name: vol ` + + tmpl, err := template.New("recycler-pod").Parse(templateContent) + if err != nil { + return fmt.Errorf("failed to parse recycler pod template: %w", err) + } + + err = tmpl.Execute(&result, data) + if err != nil { + return fmt.Errorf("failed to render the recycler pod template: %w", err) + } + + config.Data[RecyclerPodTemplateKey] = result.String() + return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go index ff487ce557..673f4edc00 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/kcm/deployment.go @@ -64,7 +64,7 @@ func kcmLabels() map[string]string { } } -func ReconcileDeployment(deployment *appsv1.Deployment, config, rootCA, serviceServingCA *corev1.ConfigMap, p *KubeControllerManagerParams) error { +func ReconcileDeployment(deployment *appsv1.Deployment, config, rootCA, serviceServingCA *corev1.ConfigMap, p *KubeControllerManagerParams, platformType hyperv1.PlatformType) error { // preserve existing resource requirements for main KCM container mainContainer := util.FindContainer(kcmContainerMain().Name, deployment.Spec.Template.Spec.Containers) if mainContainer != nil { @@ -99,7 +99,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, config, rootCA, serviceS deployment.Spec.Template.ObjectMeta.Annotations = map[string]string{} } deployment.Spec.Template.ObjectMeta.Annotations[configHashAnnotation] = util.ComputeHash(configBytes) - deployment.Spec.Template.ObjectMeta.Annotations[rootCAHashAnnotation] = util.HashStruct(rootCA.Data) + deployment.Spec.Template.ObjectMeta.Annotations[rootCAHashAnnotation] = util.HashSimple(rootCA.Data) deployment.Spec.Template.Spec = corev1.PodSpec{ AutomountServiceAccountToken: pointer.Bool(false), @@ -120,7 +120,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, config, rootCA, serviceS } p.DeploymentConfig.ApplyTo(deployment) if serviceServingCA != nil { - deployment.Spec.Template.ObjectMeta.Annotations[serviceCAHashAnnotation] = util.HashStruct(serviceServingCA.Data) + deployment.Spec.Template.ObjectMeta.Annotations[serviceCAHashAnnotation] = util.HashSimple(serviceServingCA.Data) applyServingCAVolume(&deployment.Spec.Template.Spec, serviceServingCA) } else { deployment.Spec.Template.ObjectMeta.Annotations[serviceCAHashAnnotation] = "" @@ -128,7 +128,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, config, rootCA, serviceS applyCloudConfigVolumeMount(&deployment.Spec.Template.Spec, p.CloudProviderConfig, p.CloudProvider) util.ApplyCloudProviderCreds(&deployment.Spec.Template.Spec, p.CloudProvider, p.CloudProviderCreds, p.TokenMinterImage, kcmContainerMain().Name) - util.AvailabilityProber(kas.InClusterKASReadyURL(), p.AvailabilityProberImage, &deployment.Spec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), p.AvailabilityProberImage, &deployment.Spec.Template.Spec) return nil } @@ -361,6 +361,7 @@ func kcmArgs(p *KubeControllerManagerParams) []string { "--cluster-signing-duration=17520h", fmt.Sprintf("--tls-cert-file=%s", cpath(kcmVolumeServerCert().Name, corev1.TLSCertKey)), fmt.Sprintf("--tls-private-key-file=%s", cpath(kcmVolumeServerCert().Name, corev1.TLSPrivateKeyKey)), + "--node-monitor-grace-period=50s", }...) for _, f := range p.FeatureGates() { args = append(args, fmt.Sprintf("--feature-gates=%s", f)) diff --git a/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go index 690dee6d54..2594d4a9b7 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/konnectivity/reconcile.go @@ -19,6 +19,10 @@ import ( "github.com/openshift/hypershift/support/util" ) +const ( + konnectivityAgentName = "konnectivity-agent" +) + var ( volumeMounts = util.PodVolumeMounts{ konnectivityAgentContainer().Name: util.ContainerVolumeMounts{ @@ -30,8 +34,8 @@ var ( func konnectivityAgentLabels() map[string]string { return map[string]string{ - "app": "konnectivity-agent", - hyperv1.ControlPlaneComponent: "konnectivity-agent", + "app": konnectivityAgentName, + hyperv1.ControlPlaneComponent: konnectivityAgentName, } } @@ -52,7 +56,7 @@ func buildKonnectivitySignerCAkonnectivitySignerCAVolume(v *corev1.Volume) { func konnectivityAgentContainer() *corev1.Container { return &corev1.Container{ - Name: "konnectivity-agent", + Name: konnectivityAgentName, } } @@ -71,6 +75,13 @@ func buildKonnectivityVolumeAgentCerts(v *corev1.Volume) { func ReconcileAgentDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, deploymentConfig config.DeploymentConfig, image string, ips []string) error { ownerRef.ApplyTo(deployment) + + // preserve existing resource requirements for main scheduler container + mainContainer := util.FindContainer(konnectivityAgentName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + deploymentConfig.SetContainerResourcesIfPresent(mainContainer) + } + deployment.Spec = appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: konnectivityAgentLabels(), @@ -132,11 +143,11 @@ func buildKonnectivityAgentContainer(image string, ips []string) func(c *corev1. "--keepalive-time", "30s", "--probe-interval", - "30s", + "5s", "--sync-interval", - "1m", + "5s", "--sync-interval-cap", - "5m", + "30s", "--v", "3", } diff --git a/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go index 69f78de38e..895bd2ff56 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/machineapprover/reconcile.go @@ -21,6 +21,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +const ( + machineApproverName = "machine-approver-controller" +) + func ReconcileMachineApproverConfig(cm *corev1.ConfigMap, owner config.OwnerRef) error { owner.ApplyTo(cm) type NodeClientCert struct { @@ -81,6 +85,20 @@ func ReconcileMachineApproverRoleBinding(binding *rbacv1.RoleBinding, role *rbac func ReconcileMachineApproverDeployment(deployment *appsv1.Deployment, hcp *hyperv1.HostedControlPlane, sa *corev1.ServiceAccount, kubeconfigSecretName string, cm *corev1.ConfigMap, machineApproverImage, availabilityProberImage string, setDefaultSecurityContext bool, ownerRef config.OwnerRef) error { ownerRef.ApplyTo(deployment) + machineApproverResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("50Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + }, + } + // preserve existing resource requirements + mainContainer := util.FindContainer(machineApproverName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + machineApproverResources = mainContainer.Resources + } + } + // TODO: enable leader election when the flag is added in machine-approver args := []string{ "--config=/var/run/configmaps/config/config.yaml", @@ -143,7 +161,7 @@ func ReconcileMachineApproverDeployment(deployment *appsv1.Deployment, hcp *hype }, Containers: []corev1.Container{ { - Name: "machine-approver-controller", + Name: machineApproverName, Image: machineApproverImage, ImagePullPolicy: corev1.PullIfNotPresent, VolumeMounts: []corev1.VolumeMount{ @@ -156,21 +174,16 @@ func ReconcileMachineApproverDeployment(deployment *appsv1.Deployment, hcp *hype MountPath: "/var/run/configmaps/config", }, }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("50Mi"), - corev1.ResourceCPU: resource.MustParse("10m"), - }, - }, - Command: []string{"/usr/bin/machine-approver"}, - Args: args, + Resources: machineApproverResources, + Command: []string{"/usr/bin/machine-approver"}, + Args: args, }, }, }, }, } - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(hcp.Spec.Platform.Type), availabilityProberImage, &deployment.Spec.Template.Spec) deploymentConfig := config.DeploymentConfig{ Scheduling: config.Scheduling{ diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/cco.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/cco.go new file mode 100644 index 0000000000..a210c43ac6 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/cco.go @@ -0,0 +1,27 @@ +package manifests + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Cloud Credential Operator + +func CloudCredentialOperatorKubeconfig(ns string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cloud-credential-operator-kubeconfig", + Namespace: ns, + }, + } +} + +func CloudCredentialOperatorDeployment(ns string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cloud-credential-operator", + Namespace: ns, + }, + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/cno.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/cno.go index 920cf7971d..ad61f36b48 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/manifests/cno.go +++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/cno.go @@ -74,3 +74,21 @@ func OVNKubeSBDBRoute(namespace string) *routev1.Route { }, } } + +func MasterExternalService(namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "ovnkube-master-external", + }, + } +} + +func MasterInternalService(namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "ovnkube-master-internal", + }, + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/etcd.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/etcd.go index ebf57b7064..5fe7aa54b9 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/manifests/etcd.go +++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/etcd.go @@ -5,9 +5,14 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" + rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +const ( + EtcdDefragName = "etcd-defrag-controller" +) + func EtcdStatefulSet(ns string) *appsv1.StatefulSet { return &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ @@ -52,3 +57,31 @@ func EtcdPodDisruptionBudget(ns string) *policyv1.PodDisruptionBudget { }, } } + +func EtcdDefragControllerRole(ns string) *rbacv1.Role { + return &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: EtcdDefragName, + Namespace: ns, + }, + } +} + +func EtcdDefragControllerRoleBinding(ns string) *rbacv1.RoleBinding { + return &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: EtcdDefragName, + Namespace: ns, + }, + } + +} + +func EtcdDefragControllerServiceAccount(ns string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: EtcdDefragName, + Namespace: ns, + }, + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/ingress.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/ingress.go index c7320d1847..d976156d8f 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/manifests/ingress.go +++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/ingress.go @@ -3,6 +3,7 @@ package manifests import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,15 +30,6 @@ func IngressDefaultIngressController() *operatorv1.IngressController { } } -func IngressPrivateIngressController(name string) *operatorv1.IngressController { - return &operatorv1.IngressController{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: "openshift-ingress-operator", - }, - } -} - func RouterServiceAccount(ns string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ @@ -162,3 +154,12 @@ func MetricsForwarderRoute(ns string) *routev1.Route { }, } } + +func RouterPodDisruptionBudget(ns string) *policyv1.PodDisruptionBudget { + return &policyv1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: "router", + Namespace: ns, + }, + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go index 72b9289614..7e354186a1 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go +++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/kas.go @@ -130,6 +130,15 @@ func KASConfig(controlPlaneNamespace string) *corev1.ConfigMap { } } +func AuthConfig(controlPlaneNamespace string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "auth-config", + Namespace: controlPlaneNamespace, + }, + } +} + func KASOAuthMetadata(controlPlaneNamespace string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/oauth.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/oauth.go index a02ed42acf..ad9002a9b8 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/manifests/oauth.go +++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/oauth.go @@ -17,6 +17,15 @@ func OAuthServerConfig(ns string) *corev1.ConfigMap { } } +func OAuthAuditConfig(ns string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "oauth-openshift-audit", + Namespace: ns, + }, + } +} + func OAuthServerPodDisruptionBudget(ns string) *policyv1.PodDisruptionBudget { return &policyv1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ diff --git a/control-plane-operator/controllers/hostedcontrolplane/manifests/openshift_apiserver.go b/control-plane-operator/controllers/hostedcontrolplane/manifests/openshift_apiserver.go index 61a9e85b7d..202a8c106f 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/manifests/openshift_apiserver.go +++ b/control-plane-operator/controllers/hostedcontrolplane/manifests/openshift_apiserver.go @@ -4,6 +4,7 @@ import ( prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,6 +43,13 @@ func OpenShiftAPIServerDeployment(ns string) *appsv1.Deployment { Name: "openshift-apiserver", Namespace: ns, }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + TerminationGracePeriodSeconds: ptr.To[int64](120), + }, + }, + }, } } diff --git a/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go b/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go index 7c5639a462..e44314218c 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/mcs/params.go @@ -9,19 +9,22 @@ import ( hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/support/config" "github.com/openshift/hypershift/support/globalconfig" + "github.com/openshift/hypershift/support/util" ) type MCSParams struct { - OwnerRef config.OwnerRef - RootCA *corev1.Secret - KubeletClientCA *corev1.ConfigMap - UserCA *corev1.ConfigMap - PullSecret *corev1.Secret - DNS *configv1.DNS - Infrastructure *configv1.Infrastructure - Network *configv1.Network - Proxy *configv1.Proxy - InstallConfig *globalconfig.InstallConfig + OwnerRef config.OwnerRef + RootCA *corev1.Secret + KubeletClientCA *corev1.ConfigMap + UserCA *corev1.ConfigMap + PullSecret *corev1.Secret + DNS *configv1.DNS + Infrastructure *configv1.Infrastructure + Network *configv1.Network + Proxy *configv1.Proxy + Image *configv1.Image + InstallConfig *globalconfig.InstallConfig + ConfigurationHash string } func NewMCSParams(hcp *hyperv1.HostedControlPlane, rootCA, pullSecret *corev1.Secret, userCA, kubeletClientCA *corev1.ConfigMap) (*MCSParams, error) { @@ -40,16 +43,26 @@ func NewMCSParams(hcp *hyperv1.HostedControlPlane, rootCA, pullSecret *corev1.Se proxy := globalconfig.ProxyConfig() globalconfig.ReconcileProxyConfigWithStatus(proxy, hcp) + image := globalconfig.ImageConfig() + globalconfig.ReconcileImageConfig(image, hcp) + + hcConfigurationHash, err := util.HashStruct(hcp.Spec.Configuration) + if err != nil { + return &MCSParams{}, fmt.Errorf("failed to hash HCP configuration: %w", err) + } + return &MCSParams{ - OwnerRef: config.OwnerRefFrom(hcp), - RootCA: rootCA, - KubeletClientCA: kubeletClientCA, - UserCA: userCA, - PullSecret: pullSecret, - DNS: dns, - Infrastructure: infra, - Network: network, - Proxy: proxy, - InstallConfig: globalconfig.NewInstallConfig(hcp), + OwnerRef: config.OwnerRefFrom(hcp), + RootCA: rootCA, + KubeletClientCA: kubeletClientCA, + UserCA: userCA, + PullSecret: pullSecret, + DNS: dns, + Infrastructure: infra, + Network: network, + Proxy: proxy, + Image: image, + InstallConfig: globalconfig.NewInstallConfig(hcp), + ConfigurationHash: hcConfigurationHash, }, nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/mcs/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/mcs/reconcile.go index 8770f554ea..391588c8c5 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/mcs/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/mcs/reconcile.go @@ -31,6 +31,10 @@ func ReconcileMachineConfigServerConfig(cm *corev1.ConfigMap, p *MCSParams) erro if err != nil { return err } + serializedImage, err := serialize(p.Image) + if err != nil { + return err + } serializedMasterConfigPool, err := serializeConfigPool(masterConfigPool()) if err != nil { return err @@ -54,9 +58,11 @@ func ReconcileMachineConfigServerConfig(cm *corev1.ConfigMap, p *MCSParams) erro cm.Data["cluster-infrastructure-02-config.yaml"] = serializedInfra cm.Data["cluster-network-02-config.yaml"] = serializedNetwork cm.Data["cluster-proxy-01-config.yaml"] = serializedProxy + cm.Data["image-config.yaml"] = serializedImage cm.Data["install-config.yaml"] = p.InstallConfig.String() cm.Data["master.machineconfigpool.yaml"] = serializedMasterConfigPool cm.Data["worker.machineconfigpool.yaml"] = serializedWorkerConfigPool + cm.Data["configuration-hash"] = p.ConfigurationHash return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/nto/clusternodetuningoperator.go b/control-plane-operator/controllers/hostedcontrolplane/nto/clusternodetuningoperator.go index 2cbb2690ff..c8bcceebf4 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/nto/clusternodetuningoperator.go +++ b/control-plane-operator/controllers/hostedcontrolplane/nto/clusternodetuningoperator.go @@ -176,6 +176,21 @@ func ReconcileServiceAccount(sa *corev1.ServiceAccount, ownerRef config.OwnerRef func ReconcileDeployment(dep *appsv1.Deployment, params Params) error { params.OwnerRef.ApplyTo(dep) + + ntoResources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("50Mi"), + corev1.ResourceCPU: resource.MustParse("10m"), + }, + } + // preserve existing resource requirements + mainContainer := util.FindContainer(operatorName, dep.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + ntoResources = mainContainer.Resources + } + } + dep.Spec.Selector = &metav1.LabelSelector{ MatchLabels: map[string]string{ "name": operatorName, @@ -229,11 +244,8 @@ func ReconcileDeployment(dep *appsv1.Deployment, params Params) error { Ports: []corev1.ContainerPort{ {Name: "metrics", ContainerPort: 60000}, }, - ImagePullPolicy: corev1.PullIfNotPresent, - Resources: corev1.ResourceRequirements{Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("50Mi"), - }}, + ImagePullPolicy: corev1.PullIfNotPresent, + Resources: ntoResources, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, VolumeMounts: []corev1.VolumeMount{ {Name: "node-tuning-operator-tls", MountPath: "/etc/secrets"}, diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/auditcfg.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/auditcfg.go index 78a5a8dc5f..bcac729b9a 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oapi/auditcfg.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/auditcfg.go @@ -11,7 +11,8 @@ import ( ) const ( - auditPolicyConfigMapKey = "policy.yaml" + auditPolicyConfigMapKey = "policy.yaml" + auditPolicyProfileMapKey = "profile" ) func ReconcileAuditConfig(cm *corev1.ConfigMap, ownerRef config.OwnerRef, auditConfig configv1.Audit) error { @@ -28,5 +29,6 @@ func ReconcileAuditConfig(cm *corev1.ConfigMap, ownerRef config.OwnerRef, auditC return err } cm.Data[auditPolicyConfigMapKey] = string(policyBytes) + cm.Data[auditPolicyProfileMapKey] = string(auditConfig.Profile) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/config.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/config.go index b4cd0fc96d..160a5f1ce7 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oapi/config.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/config.go @@ -26,7 +26,7 @@ const ( defaultInternalRegistryHostname = "image-registry.openshift-image-registry.svc:5000" ) -func ReconcileConfig(cm *corev1.ConfigMap, auditWebhookRef *corev1.LocalObjectReference, ownerRef config.OwnerRef, etcdURL, ingressDomain, minTLSVersion string, cipherSuites []string, imageConfig *configv1.Image, projectConfig *configv1.Project) error { +func ReconcileConfig(cm *corev1.ConfigMap, auditWebhookRef *corev1.LocalObjectReference, ownerRef config.OwnerRef, etcdURL, ingressDomain, minTLSVersion string, cipherSuites []string, imageConfig *configv1.ImageSpec, projectConfig *configv1.Project) error { ownerRef.ApplyTo(cm) if cm.Data == nil { cm.Data = map[string]string{} @@ -46,7 +46,7 @@ func ReconcileConfig(cm *corev1.ConfigMap, auditWebhookRef *corev1.LocalObjectRe return nil } -func reconcileConfigObject(cfg *openshiftcpv1.OpenShiftAPIServerConfig, auditWebhookRef *corev1.LocalObjectReference, etcdURL, ingressDomain, minTLSVersion string, cipherSuites []string, imageConfig *configv1.Image, projectConfig *configv1.Project) { +func reconcileConfigObject(cfg *openshiftcpv1.OpenShiftAPIServerConfig, auditWebhookRef *corev1.LocalObjectReference, etcdURL, ingressDomain, minTLSVersion string, cipherSuites []string, imageConfig *configv1.ImageSpec, projectConfig *configv1.Project) { cfg.TypeMeta = metav1.TypeMeta{ Kind: "OpenShiftAPIServerConfig", APIVersion: openshiftcpv1.GroupVersion.String(), @@ -58,8 +58,8 @@ func reconcileConfigObject(cfg *openshiftcpv1.OpenShiftAPIServerConfig, auditWeb cfg.APIServerArguments = map[string][]string{ "shutdown-delay-duration": {"15s"}, "audit-log-format": {"json"}, - "audit-log-maxsize": {"100"}, - "audit-log-maxbackup": {"10"}, + "audit-log-maxsize": {"10"}, + "audit-log-maxbackup": {"1"}, "audit-policy-file": {cpath(oasVolumeAuditConfig().Name, auditPolicyConfigMapKey)}, "audit-log-path": {cpath(oasVolumeWorkLogs().Name, "audit.log")}, } @@ -83,19 +83,18 @@ func reconcileConfigObject(cfg *openshiftcpv1.OpenShiftAPIServerConfig, auditWeb } // Image policy config - cfg.ImagePolicyConfig.InternalRegistryHostname = imageConfig.Status.InternalRegistryHostname - cfg.ImagePolicyConfig.ExternalRegistryHostnames = imageConfig.Status.ExternalRegistryHostnames - if cfg.ImagePolicyConfig.InternalRegistryHostname == "" { - cfg.ImagePolicyConfig.InternalRegistryHostname = defaultInternalRegistryHostname - } - var allowedRegistries openshiftcpv1.AllowedRegistries - for _, location := range imageConfig.Spec.AllowedRegistriesForImport { - allowedRegistries = append(allowedRegistries, openshiftcpv1.RegistryLocation{ - DomainName: location.DomainName, - Insecure: location.Insecure, - }) + cfg.ImagePolicyConfig.InternalRegistryHostname = defaultInternalRegistryHostname + if imageConfig != nil { + cfg.ImagePolicyConfig.ExternalRegistryHostnames = imageConfig.ExternalRegistryHostnames + var allowedRegistries openshiftcpv1.AllowedRegistries + for _, location := range imageConfig.AllowedRegistriesForImport { + allowedRegistries = append(allowedRegistries, openshiftcpv1.RegistryLocation{ + DomainName: location.DomainName, + Insecure: location.Insecure, + }) + } + cfg.ImagePolicyConfig.AllowedRegistriesForImport = allowedRegistries } - cfg.ImagePolicyConfig.AllowedRegistriesForImport = allowedRegistries // Routing config cfg.RoutingConfig.Subdomain = ingressDomain diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go index c8a82d4e89..7029deb2ca 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment.go @@ -6,13 +6,16 @@ import ( "path" "strings" + configv1 "github.com/openshift/api/config/v1" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" @@ -30,6 +33,10 @@ const ( defaultOAPIPort int32 = 8443 serviceCAHashAnnotation = "kube-controller-manager.hypershift.openshift.io/service-ca-hash" + + konnectivityHTTPSProxyPort = 8090 + + certsTrustPath = "/etc/pki/tls/certs" ) var ( @@ -50,7 +57,7 @@ var ( oasTrustAnchorVolume().Name: "/etc/pki/ca-trust/extracted/pem", pullSecretVolume().Name: "/var/lib/kubelet", }, - oasSocks5ProxyContainer().Name: { + oasKonnectivityProxyContainer().Name: { oasVolumeKubeconfig().Name: "/etc/kubernetes/secrets/kubeconfig", oasVolumeKonnectivityProxyCert().Name: "/etc/konnectivity/proxy-client", oasVolumeKonnectivityProxyCA().Name: "/etc/konnectivity/proxy-ca", @@ -76,7 +83,24 @@ func openShiftAPIServerLabels() map[string]string { } } -func ReconcileDeployment(deployment *appsv1.Deployment, auditWebhookRef *corev1.LocalObjectReference, ownerRef config.OwnerRef, config *corev1.ConfigMap, auditConfig *corev1.ConfigMap, serviceServingCA *corev1.ConfigMap, deploymentConfig config.DeploymentConfig, image string, socks5ProxyImage string, etcdURL string, availabilityProberImage string, internalOAuthDisable bool) error { +func ReconcileDeployment(deployment *appsv1.Deployment, + auditWebhookRef *corev1.LocalObjectReference, + ownerRef config.OwnerRef, + config *corev1.ConfigMap, + auditConfig *corev1.ConfigMap, + serviceServingCA *corev1.ConfigMap, + deploymentConfig config.DeploymentConfig, + image string, + konnectivityHTTPSProxyImage string, + etcdURL string, + availabilityProberImage string, + internalOAuthDisable bool, + platformType hyperv1.PlatformType, + hcpAdditionalTrustBundle *corev1.LocalObjectReference, + imageRegistryAdditionalTrustedCAs *corev1.ConfigMap, + clusterConf *hyperv1.ClusterConfiguration, + proxyConfig *configv1.ProxySpec, + noProxy string) error { ownerRef.ApplyTo(deployment) // preserve existing resource requirements for main OAS container @@ -128,28 +152,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, auditWebhookRef *corev1. InitContainers: []corev1.Container{util.BuildContainer(oasTrustAnchorGenerator(), buildOASTrustAnchorGenerator(image))}, Containers: []corev1.Container{ util.BuildContainer(oasContainerMain(), buildOASContainerMain(image, strings.Split(etcdUrlData.Host, ":")[0], defaultOAPIPort, internalOAuthDisable)), - { - Name: "audit-logs", - Image: image, - ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{ - "/usr/bin/tail", - "-c+1", - "-F", - fmt.Sprintf("%s/%s", volumeMounts.Path(oasContainerMain().Name, oasVolumeWorkLogs().Name), "audit.log"), - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("5m"), - corev1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, - VolumeMounts: []corev1.VolumeMount{{ - Name: oasVolumeWorkLogs().Name, - MountPath: volumeMounts.Path(oasContainerMain().Name, oasVolumeWorkLogs().Name), - }}, - }, - util.BuildContainer(oasSocks5ProxyContainer(), buildOASSocks5ProxyContainer(socks5ProxyImage)), + util.BuildContainer(oasKonnectivityProxyContainer(), buildOASKonnectivityProxyContainer(konnectivityHTTPSProxyImage, proxyConfig, noProxy)), }, Volumes: []corev1.Volume{ util.BuildVolume(oasVolumeWorkLogs(), buildOASVolumeWorkLogs), @@ -174,20 +177,127 @@ func ReconcileDeployment(deployment *appsv1.Deployment, auditWebhookRef *corev1. }, } + if auditConfig.Data[auditPolicyProfileMapKey] != string(configv1.NoneAuditProfileType) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, corev1.Container{ + Name: "audit-logs", + Image: image, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/bash"}, + Args: []string{ + "-c", + kas.RenderAuditLogScript(fmt.Sprintf("%s/%s", volumeMounts.Path(oasContainerMain().Name, oasVolumeWorkLogs().Name), "audit.log")), + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: oasVolumeWorkLogs().Name, + MountPath: volumeMounts.Path(oasContainerMain().Name, oasVolumeWorkLogs().Name), + }}, + }) + } + if serviceServingCA != nil { deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, util.BuildVolume(serviceCASignerVolume(), buildServiceCASignerVolume)) trustAnchorGeneratorContainer := util.FindContainer(oasTrustAnchorGenerator().Name, deployment.Spec.Template.Spec.InitContainers) trustAnchorGeneratorContainer.VolumeMounts = append(trustAnchorGeneratorContainer.VolumeMounts, serviceSignerCertMount.ContainerMounts(oasTrustAnchorGenerator().Name)...) - deployment.Spec.Template.ObjectMeta.Annotations[serviceCAHashAnnotation] = util.HashStruct(serviceServingCA.Data) + deployment.Spec.Template.ObjectMeta.Annotations[serviceCAHashAnnotation] = util.HashSimple(serviceServingCA.Data) } else { deployment.Spec.Template.ObjectMeta.Annotations[serviceCAHashAnnotation] = "" } + var additionalCAs []corev1.VolumeProjection + + // if hostedCluster additionalTrustBundle is set, add it to the volumeProjection + if hcpAdditionalTrustBundle != nil { + additionalCAs = append(additionalCAs, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: *hcpAdditionalTrustBundle, + Items: []corev1.KeyToPath{{Key: "ca-bundle.crt", Path: "additional-ca-bundle.pem"}}, + }, + }) + } + + // If additional trusted CAs exist for image registries, add them to the volumeProjection + // The configmap for image registry additional trusted CA can have a separate key per registry. + // Each entry in the configmap will get its own key to path mapping so that we mount it separately. + if imageRegistryAdditionalTrustedCAs != nil && len(imageRegistryAdditionalTrustedCAs.Data) > 0 { + vol := corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{Name: clusterConf.Image.AdditionalTrustedCA.Name}, + }, + } + // use a set to get a sorted key list for consistency across reconciles + keys := sets.New[string]() + for key := range imageRegistryAdditionalTrustedCAs.Data { + keys.Insert(key) + } + for i, key := range sets.List(keys) { + vol.ConfigMap.Items = append(vol.ConfigMap.Items, corev1.KeyToPath{ + Key: key, + Path: fmt.Sprintf("image-registry-%d.pem", i+1), + }) + } + additionalCAs = append(additionalCAs, vol) + } + + if len(additionalCAs) > 0 { + projVol := util.BuildProjectedVolume(additionalTrustBundleProjectedVolume(), additionalCAs, buildAdditionalTrustBundleProjectedVolume) + deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, projVol) + mainContainer := util.FindContainer(oasContainerMain().Name, deployment.Spec.Template.Spec.Containers) + for _, additionalCA := range additionalCAs { + for _, item := range additionalCA.ConfigMap.Items { + mainContainer.VolumeMounts = append(mainContainer.VolumeMounts, corev1.VolumeMount{ + Name: additionalTrustBundleProjectedVolume().Name, + MountPath: path.Join(certsTrustPath, item.Path), + SubPath: item.Path, + }) + } + } + } if auditWebhookRef != nil { applyOASAuditWebhookConfigFileVolume(&deployment.Spec.Template.Spec, auditWebhookRef) } - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec) + var proxyAdditionalCAs []corev1.VolumeProjection + if hcpAdditionalTrustBundle != nil { + proxyAdditionalCAs = append(proxyAdditionalCAs, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: *hcpAdditionalTrustBundle, + Items: []corev1.KeyToPath{{Key: "ca-bundle.crt", Path: "additional-ca-bundle.pem"}}, + }, + }) + } + + if clusterConf != nil && clusterConf.Proxy != nil && len(clusterConf.Proxy.TrustedCA.Name) > 0 { + proxyAdditionalCAs = append(proxyAdditionalCAs, corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: clusterConf.Proxy.TrustedCA.Name, + }, + Items: []corev1.KeyToPath{{Key: "ca-bundle.crt", Path: "proxy-trusted-ca.pem"}}, + }, + }) + } + if len(proxyAdditionalCAs) > 0 { + projVol := util.BuildProjectedVolume(proxyAdditionalTrustBundleProjectedVolume(), proxyAdditionalCAs, buildAdditionalTrustBundleProjectedVolume) + deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, projVol) + proxyContainer := util.FindContainer(oasKonnectivityProxyContainer().Name, deployment.Spec.Template.Spec.Containers) + for _, additionalCA := range proxyAdditionalCAs { + for _, item := range additionalCA.ConfigMap.Items { + proxyContainer.VolumeMounts = append(proxyContainer.VolumeMounts, corev1.VolumeMount{ + Name: proxyAdditionalTrustBundleProjectedVolume().Name, + MountPath: path.Join(certsTrustPath, item.Path), + SubPath: item.Path, + }) + } + } + } + + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec) deploymentConfig.ApplyTo(deployment) @@ -206,34 +316,51 @@ func oasContainerMain() *corev1.Container { } } -func oasSocks5ProxyContainer() *corev1.Container { +func oasKonnectivityProxyContainer() *corev1.Container { return &corev1.Container{ - Name: "socks5-proxy", + Name: "konnectivity-proxy", } } +const buildTrustAnchorScript = ` +#!/bin/bash + +set -euo pipefail + +cp -f -r /etc/pki/ca-trust/extracted/pem/* /run/ca-trust-generated/ + +if ! [[ -f /run/service-ca-signer/service-ca.crt ]]; then + exit 0 +fi + +chmod 0666 /run/ca-trust-generated/tls-ca-bundle.pem +echo '#service signer ca' >> /run/ca-trust-generated/tls-ca-bundle.pem +cat /run/service-ca-signer/service-ca.crt >>/run/ca-trust-generated/tls-ca-bundle.pem +chmod 0444 /run/ca-trust-generated/tls-ca-bundle.pem +` + func buildOASTrustAnchorGenerator(oasImage string) func(*corev1.Container) { return func(c *corev1.Container) { c.Image = oasImage c.Command = []string{ "/bin/bash", "-c", - "cp -f /etc/pki/ca-trust/extracted/pem/* /run/ca-trust-generated/ && " + - "if ! [[ -f /run/service-ca-signer/service-ca.crt ]]; then exit 0; fi && " + - "chmod 0666 /run/ca-trust-generated/tls-ca-bundle.pem && " + - "echo '#service signer ca' >> /run/ca-trust-generated/tls-ca-bundle.pem && " + - "cat /run/service-ca-signer/service-ca.crt >>/run/ca-trust-generated/tls-ca-bundle.pem && " + - "chmod 0444 /run/ca-trust-generated/tls-ca-bundle.pem", + buildTrustAnchorScript, } c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) } } -func buildOASSocks5ProxyContainer(socks5ProxyImage string) func(c *corev1.Container) { +func buildOASKonnectivityProxyContainer(konnectivityHTTPSProxyImage string, proxyConfig *configv1.ProxySpec, noProxy string) func(c *corev1.Container) { return func(c *corev1.Container) { - c.Image = socks5ProxyImage - c.Command = []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy"} + c.Image = konnectivityHTTPSProxyImage + c.Command = []string{"/usr/bin/control-plane-operator", "konnectivity-https-proxy"} c.Args = []string{"run"} + if proxyConfig != nil { + c.Args = append(c.Args, "--http-proxy", proxyConfig.HTTPProxy) + c.Args = append(c.Args, "--https-proxy", proxyConfig.HTTPSProxy) + c.Args = append(c.Args, "--no-proxy", noProxy) + } c.Resources.Requests = corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("10m"), corev1.ResourceMemory: resource.MustParse("10Mi"), @@ -267,20 +394,18 @@ func buildOASContainerMain(image string, etcdHostname string, port int32, intern if internalOAuthDisable { c.Args = append(c.Args, "--internal-oauth-disabled=true") } - // this list can be gathered from firewall docs: https://docs.openshift.com/container-platform/4.12/installing/install_config/configuring-firewall.html - defaultSampleImportContainerRegistries := "quay.io,cdn03.quay.io,cdn02.quay.io,cdn01.quay.io,cdn.quay.io,registry.redhat.io,registry.access.redhat.com,access.redhat.com,sso.redhat.com" c.Env = []corev1.EnvVar{ { Name: "HTTP_PROXY", - Value: fmt.Sprintf("socks5://127.0.0.1:%d", kas.KonnectivityServerLocalPort), + Value: fmt.Sprintf("http://127.0.0.1:%d", konnectivityHTTPSProxyPort), }, { Name: "HTTPS_PROXY", - Value: fmt.Sprintf("socks5://127.0.0.1:%d", kas.KonnectivityServerLocalPort), + Value: fmt.Sprintf("http://127.0.0.1:%d", konnectivityHTTPSProxyPort), }, { Name: "NO_PROXY", - Value: fmt.Sprintf("%s,%s,%s", manifests.KubeAPIServerService("").Name, etcdHostname, defaultSampleImportContainerRegistries), + Value: fmt.Sprintf("%s,%s", manifests.KubeAPIServerService("").Name, etcdHostname), }, } c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) @@ -365,7 +490,7 @@ func oasVolumeKubeconfig() *corev1.Volume { func buildOASVolumeKubeconfig(v *corev1.Volume) { v.Secret = &corev1.SecretVolumeSource{} v.Secret.SecretName = manifests.KASServiceKubeconfigSecret("").Name - v.Secret.DefaultMode = pointer.Int32(0640) + v.Secret.DefaultMode = ptr.To[int32](0640) } func oasVolumeEtcdClientCA() *corev1.Volume { @@ -388,7 +513,7 @@ func oasVolumeServingCert() *corev1.Volume { func buildOASVolumeServingCert(v *corev1.Volume) { v.Secret = &corev1.SecretVolumeSource{} v.Secret.SecretName = manifests.OpenShiftAPIServerCertSecret("").Name - v.Secret.DefaultMode = pointer.Int32(0640) + v.Secret.DefaultMode = ptr.To[int32](0640) } func oasVolumeEtcdClientCert() *corev1.Volume { @@ -400,7 +525,7 @@ func oasVolumeEtcdClientCert() *corev1.Volume { func buildOASVolumeEtcdClientCert(v *corev1.Volume) { v.Secret = &corev1.SecretVolumeSource{} v.Secret.SecretName = manifests.EtcdClientSecret("").Name - v.Secret.DefaultMode = pointer.Int32(0640) + v.Secret.DefaultMode = ptr.To[int32](0640) } func oasVolumeKonnectivityProxyCert() *corev1.Volume { @@ -432,6 +557,25 @@ func buildServiceCASignerVolume(v *corev1.Volume) { v.ConfigMap.Name = manifests.ServiceServingCA("").Name } +func additionalTrustBundleProjectedVolume() *corev1.Volume { + return &corev1.Volume{ + Name: "additional-trust-bundle", + } +} + +func proxyAdditionalTrustBundleProjectedVolume() *corev1.Volume { + return &corev1.Volume{ + Name: "proxy-additional-trust-bundle", + } +} + +func buildAdditionalTrustBundleProjectedVolume(v *corev1.Volume, additionalCAs []corev1.VolumeProjection) { + v.Projected = &corev1.ProjectedVolumeSource{ + Sources: additionalCAs, + DefaultMode: ptr.To[int32](420), + } +} + func pullSecretVolume() *corev1.Volume { return &corev1.Volume{ Name: "pull-secret", @@ -441,7 +585,7 @@ func pullSecretVolume() *corev1.Volume { func buildOASVolumeKonnectivityProxyCert(v *corev1.Volume) { v.Secret = &corev1.SecretVolumeSource{} v.Secret.SecretName = manifests.KonnectivityClientSecret("").Name - v.Secret.DefaultMode = pointer.Int32(0640) + v.Secret.DefaultMode = ptr.To[int32](0640) } func buildOASVolumeKonnectivityProxyCA(v *corev1.Volume) { diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment_test.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment_test.go new file mode 100644 index 0000000000..7189b3e4a6 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/deployment_test.go @@ -0,0 +1,159 @@ +package oapi + +import ( + "testing" + + . "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +// Ensure certain deployment fields do not get set +func TestReconcileOpenshiftAPIServerDeploymentTrustBundle(t *testing.T) { + var ( + imageName = "oapiImage" + targetNamespace = "test" + oapiDeployment = manifests.OpenShiftAPIServerDeployment(targetNamespace) + hcp = &hyperv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hcp", + Namespace: targetNamespace, + }, + } + testOapiCM = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-oapi-config", + Namespace: targetNamespace, + }, + Data: map[string]string{"config.yaml": "test-data"}, + } + ) + hcp.Name = "name" + hcp.Namespace = "namespace" + ownerRef := config.OwnerRefFrom(hcp) + testCases := []struct { + name string + cm corev1.ConfigMap + expectedVolume *corev1.Volume + auditConfig *corev1.ConfigMap + expectedVolumeProjection []corev1.VolumeProjection + deploymentConfig config.DeploymentConfig + additionalTrustBundle *corev1.LocalObjectReference + clusterConf *hyperv1.ClusterConfiguration + imageRegistryAdditionalCAs *corev1.ConfigMap + expectProjectedVolumeMounted bool + }{ + { + name: "Trust bundle provided", + auditConfig: manifests.OpenShiftAPIServerAuditConfig(targetNamespace), + deploymentConfig: config.DeploymentConfig{}, + additionalTrustBundle: &corev1.LocalObjectReference{ + Name: "user-ca-bundle", + }, + expectedVolume: &corev1.Volume{ + Name: "additional-trust-bundle", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{getFakeVolumeProjectionCABundle()}, + DefaultMode: ptr.To[int32](420), + }, + }, + }, + expectProjectedVolumeMounted: true, + }, + { + name: "Trust bundle not provided", + auditConfig: manifests.OpenShiftAPIServerAuditConfig(targetNamespace), + deploymentConfig: config.DeploymentConfig{}, + expectedVolume: nil, + additionalTrustBundle: nil, + expectProjectedVolumeMounted: false, + }, + { + name: "Trust bundle and image registry additional CAs provided", + auditConfig: manifests.OpenShiftAPIServerAuditConfig(targetNamespace), + deploymentConfig: config.DeploymentConfig{}, + additionalTrustBundle: &corev1.LocalObjectReference{ + Name: "user-ca-bundle", + }, + imageRegistryAdditionalCAs: &corev1.ConfigMap{ + Data: map[string]string{ + "registry1": "fake-bundle", + "registry2": "fake-bundle-2", + }, + }, + clusterConf: &hyperv1.ClusterConfiguration{ + Image: &configv1.ImageSpec{ + AdditionalTrustedCA: configv1.ConfigMapNameReference{ + Name: "image-registry-additional-ca", + }, + }, + }, + expectedVolume: &corev1.Volume{ + Name: "additional-trust-bundle", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{getFakeVolumeProjectionCABundle(), getFakeVolumeProjectionImageRegistryCAs()}, + DefaultMode: ptr.To[int32](420), + }, + }, + }, + expectProjectedVolumeMounted: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewGomegaWithT(t) + tc.auditConfig.Data = map[string]string{"policy.yaml": "test-data"} + err := ReconcileDeployment(oapiDeployment, nil, ownerRef, testOapiCM, tc.auditConfig, nil, tc.deploymentConfig, imageName, "konnectivityProxyImage", config.DefaultEtcdURL, util.AvailabilityProberImageName, false, hyperv1.AgentPlatform, tc.additionalTrustBundle, tc.imageRegistryAdditionalCAs, tc.clusterConf, nil, "") + g.Expect(err).To(BeNil()) + if tc.expectProjectedVolumeMounted { + g.Expect(oapiDeployment.Spec.Template.Spec.Volumes).To(ContainElement(*tc.expectedVolume)) + } else { + g.Expect(oapiDeployment.Spec.Template.Spec.Volumes).NotTo(ContainElement(&corev1.Volume{Name: "additional-trust-bundle"})) + } + }) + } +} + +func getFakeVolumeProjectionCABundle() corev1.VolumeProjection { + return corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "user-ca-bundle", + }, + Items: []corev1.KeyToPath{ + { + Key: "ca-bundle.crt", + Path: "additional-ca-bundle.pem", + }, + }, + }, + } +} + +func getFakeVolumeProjectionImageRegistryCAs() corev1.VolumeProjection { + return corev1.VolumeProjection{ + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "image-registry-additional-ca", + }, + Items: []corev1.KeyToPath{ + { + Key: "registry1", + Path: "image-registry-1.pem", + }, + { + Key: "registry2", + Path: "image-registry-2.pem", + }, + }, + }, + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go index 93700254de..2ac00c50cd 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/oauth_deployment.go @@ -4,6 +4,7 @@ import ( "fmt" "path" + configv1 "github.com/openshift/api/config/v1" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -54,7 +55,7 @@ func openShiftOAuthAPIServerLabels() map[string]string { } } -func ReconcileOAuthAPIServerDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, auditConfig *corev1.ConfigMap, p *OAuthDeploymentParams) error { +func ReconcileOAuthAPIServerDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, auditConfig *corev1.ConfigMap, p *OAuthDeploymentParams, platformType hyperv1.PlatformType) error { ownerRef.ApplyTo(deployment) // preserve existing resource requirements for main oauth apiserver container @@ -91,30 +92,10 @@ func ReconcileOAuthAPIServerDeployment(deployment *appsv1.Deployment, ownerRef c deployment.Spec.Template.Annotations[oapiAuditConfigHashAnnotation] = auditConfigHash deployment.Spec.Template.Spec = corev1.PodSpec{ - AutomountServiceAccountToken: pointer.Bool(false), + AutomountServiceAccountToken: pointer.Bool(false), + TerminationGracePeriodSeconds: pointer.Int64(120), Containers: []corev1.Container{ util.BuildContainer(oauthContainerMain(), buildOAuthContainerMain(p)), - { - Name: "audit-logs", - Image: p.Image, - ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{ - "/usr/bin/tail", - "-c+1", - "-F", - fmt.Sprintf("%s/%s", oauthVolumeMounts.Path(oauthContainerMain().Name, oauthVolumeWorkLogs().Name), "audit.log"), - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("5m"), - corev1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, - VolumeMounts: []corev1.VolumeMount{{ - Name: oauthVolumeWorkLogs().Name, - MountPath: oauthVolumeMounts.Path(oauthContainerMain().Name, oauthVolumeWorkLogs().Name), - }}, - }, }, Volumes: []corev1.Volume{ util.BuildVolume(oauthVolumeWorkLogs(), buildOAuthVolumeWorkLogs), @@ -128,11 +109,34 @@ func ReconcileOAuthAPIServerDeployment(deployment *appsv1.Deployment, ownerRef c }, } + if auditConfig.Data[auditPolicyProfileMapKey] != string(configv1.NoneAuditProfileType) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, corev1.Container{ + Name: "audit-logs", + Image: p.Image, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/bash"}, + Args: []string{ + "-c", + kas.RenderAuditLogScript(fmt.Sprintf("%s/%s", oauthVolumeMounts.Path(oauthContainerMain().Name, oauthVolumeWorkLogs().Name), "audit.log")), + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: oauthVolumeWorkLogs().Name, + MountPath: oauthVolumeMounts.Path(oauthContainerMain().Name, oauthVolumeWorkLogs().Name), + }}, + }) + } + if p.AuditWebhookRef != nil { applyOauthAuditWebhookConfigFileVolume(&deployment.Spec.Template.Spec, p.AuditWebhookRef) } - util.AvailabilityProber(kas.InClusterKASReadyURL(), p.AvailabilityProberImage, &deployment.Spec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), p.AvailabilityProberImage, &deployment.Spec.Template.Spec) p.DeploymentConfig.ApplyTo(deployment) return nil } @@ -159,8 +163,8 @@ func buildOAuthContainerMain(p *OAuthDeploymentParams) func(c *corev1.Container) fmt.Sprintf("--api-audiences=%s", p.ServiceAccountIssuerURL), fmt.Sprintf("--audit-log-path=%s", cpath(oauthVolumeWorkLogs().Name, "audit.log")), "--audit-log-format=json", - "--audit-log-maxsize=100", - "--audit-log-maxbackup=10", + "--audit-log-maxsize=10", + "--audit-log-maxbackup=1", fmt.Sprintf("--etcd-cafile=%s", cpath(oauthVolumeEtcdClientCA().Name, certs.CASignerCertMapKey)), fmt.Sprintf("--etcd-keyfile=%s", cpath(oauthVolumeEtcdClientCert().Name, pki.EtcdClientKeyKey)), fmt.Sprintf("--etcd-certfile=%s", cpath(oauthVolumeEtcdClientCert().Name, pki.EtcdClientCrtKey)), @@ -305,17 +309,7 @@ func ReconcileOpenShiftOAuthAPIServerPodDisruptionBudget(pdb *policyv1.PodDisrup MatchLabels: openShiftOAuthAPIServerLabels(), } } - p.OwnerRef.ApplyTo(pdb) - - var minAvailable int - switch p.Availability { - case hyperv1.SingleReplica: - minAvailable = 0 - case hyperv1.HighlyAvailable: - minAvailable = 1 - } - pdb.Spec.MinAvailable = &intstr.IntOrString{Type: intstr.Int, IntVal: int32(minAvailable)} - + util.ReconcilePodDisruptionBudget(pdb, p.Availability) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go index 4fa6c8ff17..180463a106 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/params.go @@ -15,24 +15,25 @@ import ( ) type OpenShiftAPIServerParams struct { - APIServer *configv1.APIServerSpec `json:"apiServer"` + APIServer *configv1.APIServerSpec + Proxy *configv1.ProxySpec IngressSubDomain string - EtcdURL string `json:"etcdURL"` - ServiceAccountIssuerURL string `json:"serviceAccountIssuerURL"` - - OpenShiftAPIServerDeploymentConfig config.DeploymentConfig `json:"openshiftAPIServerDeploymentConfig,inline"` - OpenShiftOAuthAPIServerDeploymentConfig config.DeploymentConfig `json:"openshiftOAuthAPIServerDeploymentConfig,inline"` - config.OwnerRef `json:",inline"` - OpenShiftAPIServerImage string `json:"openshiftAPIServerImage"` - OAuthAPIServerImage string `json:"oauthAPIServerImage"` - ProxyImage string `json:"haproxyImage"` - AvailabilityProberImage string `json:"availabilityProberImage"` - Availability hyperv1.AvailabilityPolicy - Ingress *configv1.IngressSpec - Image *configv1.Image - Project *configv1.Project - AuditWebhookRef *corev1.LocalObjectReference - InternalOAuthDisable bool + EtcdURL string + ServiceAccountIssuerURL string + + OpenShiftAPIServerDeploymentConfig config.DeploymentConfig + OpenShiftOAuthAPIServerDeploymentConfig config.DeploymentConfig + config.OwnerRef + OpenShiftAPIServerImage string + OAuthAPIServerImage string + ProxyImage string + AvailabilityProberImage string + Availability hyperv1.AvailabilityPolicy + Ingress *configv1.IngressSpec + Image *configv1.ImageSpec + Project *configv1.Project + AuditWebhookRef *corev1.LocalObjectReference + InternalOAuthDisable bool } type OAuthDeploymentParams struct { @@ -53,12 +54,11 @@ func NewOpenShiftAPIServerParams(hcp *hyperv1.HostedControlPlane, observedConfig params := &OpenShiftAPIServerParams{ OpenShiftAPIServerImage: releaseImageProvider.GetImage("openshift-apiserver"), OAuthAPIServerImage: releaseImageProvider.GetImage("oauth-apiserver"), - ProxyImage: releaseImageProvider.GetImage("socks5-proxy"), + ProxyImage: releaseImageProvider.GetImage(util.CPOImageName), ServiceAccountIssuerURL: hcp.Spec.IssuerURL, IngressSubDomain: globalconfig.IngressDomain(hcp), AvailabilityProberImage: releaseImageProvider.GetImage(util.AvailabilityProberImageName), Availability: hcp.Spec.ControllerAvailabilityPolicy, - Image: observedConfig.Image, Project: observedConfig.Project, InternalOAuthDisable: !util.HCPOAuthEnabled(hcp), } @@ -66,6 +66,8 @@ func NewOpenShiftAPIServerParams(hcp *hyperv1.HostedControlPlane, observedConfig if hcp.Spec.Configuration != nil { params.Ingress = hcp.Spec.Configuration.Ingress params.APIServer = hcp.Spec.Configuration.APIServer + params.Image = hcp.Spec.Configuration.Image + params.Proxy = hcp.Spec.Configuration.Proxy } if hcp.Spec.AuditWebhook != nil && len(hcp.Spec.AuditWebhook.Name) > 0 { diff --git a/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go b/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go index e8fc2ec33c..441215587a 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oapi/pdb.go @@ -1,10 +1,9 @@ package oapi import ( - hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/support/util" policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, p *OpenShiftAPIServerParams) error { @@ -13,17 +12,7 @@ func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, p *OpenShif MatchLabels: openShiftAPIServerLabels(), } } - p.OwnerRef.ApplyTo(pdb) - - var minAvailable int - switch p.Availability { - case hyperv1.SingleReplica: - minAvailable = 0 - case hyperv1.HighlyAvailable: - minAvailable = 1 - } - pdb.Spec.MinAvailable = &intstr.IntOrString{Type: intstr.Int, IntVal: int32(minAvailable)} - + util.ReconcilePodDisruptionBudget(pdb, p.Availability) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/assets.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/assets.go new file mode 100644 index 0000000000..897b965d9a --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/assets.go @@ -0,0 +1,35 @@ +package oauth + +import ( + "bytes" + "embed" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/yaml" +) + +//go:embed files/* +var f embed.FS + +var ( + oauthPolicy = mustConfigmapData("audit-policy.yaml", "audit.yaml") +) + +func mustAsset(file string) []byte { + data, err := f.ReadFile("files/" + file) + if err != nil { + panic(err) + } + + return data +} + +func mustConfigmapData(file string, key string) string { + fileBytes := mustAsset(file) + cm := &corev1.ConfigMap{} + if err := yaml.NewYAMLOrJSONDecoder(bytes.NewReader(fileBytes), 500).Decode(&cm); err != nil { + panic(err) + } + + return cm.Data[key] +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/auditcfg.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/auditcfg.go new file mode 100644 index 0000000000..293804083f --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/auditcfg.go @@ -0,0 +1,24 @@ +package oauth + +import ( + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + + "github.com/openshift/hypershift/support/config" +) + +const ( + auditPolicyConfigMapKey = "policy.yaml" + auditPolicyProfileMapKey = "profile" +) + +func ReconcileAuditConfig(cm *corev1.ConfigMap, ownerRef config.OwnerRef, auditConfig configv1.Audit) error { + ownerRef.ApplyTo(cm) + if cm.Data == nil { + cm.Data = map[string]string{} + } + + cm.Data[auditPolicyConfigMapKey] = oauthPolicy + cm.Data[auditPolicyProfileMapKey] = string(auditConfig.Profile) + return nil +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/config.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/config.go index 53d617126e..e0ce4da5a1 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oauth/config.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/config.go @@ -51,11 +51,9 @@ func ReconcileOAuthServerConfig(ctx context.Context, cm *corev1.ConfigMap, owner } func generateOAuthConfig(ctx context.Context, client crclient.Client, namespace string, params *OAuthConfigParams) (*osinv1.OsinServerConfig, error) { - var identityProviders []osinv1.IdentityProvider - identityProviders, _, err := convertIdentityProviders(ctx, params.IdentityProviders, params.OauthConfigOverrides, client, namespace) - if err != nil { - return nil, err - } + // Ignore the error here since we don't want to fail the deployment if the identity providers are invalid + // A condition will be set on the HC to indicate the error + identityProviders, _, _ := ConvertIdentityProviders(ctx, params.IdentityProviders, params.OauthConfigOverrides, client, namespace) cpath := func(volume, file string) string { dir := volumeMounts.Path(oauthContainerMain().Name, volume) diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go index c630c7df27..b4ba999a60 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment.go @@ -20,13 +20,16 @@ import ( "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/support/config" "github.com/openshift/hypershift/support/util" - utilpointer "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( configHashAnnotation = "oauth.hypershift.openshift.io/config-hash" oauthNamedCertificateMountPathPrefix = "/etc/kubernetes/certs/named" socks5ProxyContainerName = "socks-proxy" + + httpKonnectivityProxyPort = 8092 + socks5KonnectivityProxyPort = 8090 ) var ( @@ -41,6 +44,22 @@ var ( oauthVolumeProvidersTemplate().Name: "/etc/kubernetes/secrets/templates/providers", oauthVolumeWorkLogs().Name: "/var/run/kubernetes", oauthVolumeMasterCABundle().Name: "/etc/kubernetes/certs/master-ca", + oauthVolumeAuditConfig().Name: "/etc/kubernetes/audit-config", + }, + oauthContainerSocks5Proxy().Name: { + oauthVolumeKubeconfig().Name: "/etc/kubernetes", + oauthVolumeKonnectivityProxyClientCert().Name: "/etc/konnectivity/proxy-client", + oauthVolumeKonnectivityProxyTrustBundle().Name: "/etc/konnectivity/proxy-ca", + }, + oauthContainerHTTPProxy().Name: { + oauthVolumeKubeconfig().Name: "/etc/kubernetes", + oauthVolumeKonnectivityProxyClientCert().Name: "/etc/konnectivity/proxy-client", + oauthVolumeKonnectivityProxyTrustBundle().Name: "/etc/konnectivity/proxy-ca", + }, + } + oauthAuditWebhookConfigFileVolumeMount = util.PodVolumeMounts{ + oauthContainerMain().Name: { + oauthAuditWebhookConfigFileVolume().Name: "/etc/kubernetes/auditwebhook", }, } ) @@ -52,7 +71,7 @@ func oauthLabels() map[string]string { } } -func ReconcileDeployment(ctx context.Context, client client.Client, deployment *appsv1.Deployment, ownerRef config.OwnerRef, config *corev1.ConfigMap, image string, deploymentConfig config.DeploymentConfig, identityProviders []configv1.IdentityProvider, providerOverrides map[string]*ConfigOverride, availabilityProberImage string, namedCertificates []configv1.APIServerNamedServingCert, socks5ProxyImage string, noProxy []string, params *OAuthConfigParams) error { +func ReconcileDeployment(ctx context.Context, client client.Client, deployment *appsv1.Deployment, auditWebhookRef *corev1.LocalObjectReference, ownerRef config.OwnerRef, config *corev1.ConfigMap, auditConfig *corev1.ConfigMap, image string, deploymentConfig config.DeploymentConfig, identityProviders []configv1.IdentityProvider, providerOverrides map[string]*ConfigOverride, availabilityProberImage string, namedCertificates []configv1.APIServerNamedServingCert, proxyImage string, proxyConfig *configv1.ProxySpec, clusterNoProxy string, oauthNoProxy []string, params *OAuthConfigParams, platformType hyperv1.PlatformType) error { ownerRef.ApplyTo(deployment) // preserve existing resource requirements for main oauth container @@ -87,10 +106,11 @@ func ReconcileDeployment(ctx context.Context, client client.Client, deployment * } deployment.Spec.Template.ObjectMeta.Annotations[configHashAnnotation] = util.ComputeHash(configBytes) deployment.Spec.Template.Spec = corev1.PodSpec{ - AutomountServiceAccountToken: utilpointer.Bool(false), + AutomountServiceAccountToken: ptr.To(false), Containers: []corev1.Container{ - util.BuildContainer(oauthContainerMain(), buildOAuthContainerMain(image, noProxy)), - socks5ProxyContainer(socks5ProxyImage), + util.BuildContainer(oauthContainerMain(), buildOAuthContainerMain(image, auditWebhookRef, oauthNoProxy)), + util.BuildContainer(oauthContainerSocks5Proxy(), buildOAuthContainerSocks5Proxy(proxyImage)), + util.BuildContainer(oauthContainerHTTPProxy(), buildOAuthContainerHTTPProxy(proxyImage, proxyConfig, clusterNoProxy)), }, Volumes: []corev1.Volume{ util.BuildVolume(oauthVolumeConfig(), buildOAuthVolumeConfig), @@ -109,22 +129,51 @@ func ReconcileDeployment(ctx context.Context, client client.Client, deployment * }), util.BuildVolume(oauthVolumeWorkLogs(), buildOAuthVolumeWorkLogs), util.BuildVolume(oauthVolumeMasterCABundle(), buildOAuthVolumeMasterCABundle), - {Name: "admin-kubeconfig", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: "service-network-admin-kubeconfig", DefaultMode: utilpointer.Int32(0640)}}}, - {Name: "konnectivity-proxy-cert", VolumeSource: corev1.VolumeSource{Secret: &corev1.SecretVolumeSource{SecretName: manifests.KonnectivityClientSecret("").Name, DefaultMode: utilpointer.Int32(0640)}}}, - {Name: "konnectivity-proxy-ca", VolumeSource: corev1.VolumeSource{ConfigMap: &corev1.ConfigMapVolumeSource{LocalObjectReference: corev1.LocalObjectReference{Name: manifests.KonnectivityCAConfigMap("").Name}, DefaultMode: utilpointer.Int32(0640)}}}, + util.BuildVolume(oauthVolumeAuditConfig(), buildOAuthVolumeAuditConfig), + util.BuildVolume(oauthVolumeKonnectivityProxyClientCert(), buildOAuthVolumeKonnectivityProxyClientCert), + util.BuildVolume(oauthVolumeKonnectivityProxyTrustBundle(), buildOAuthVolumeKonnectivityProxyTrustBundle), }, } + + if auditConfig.Data[auditPolicyProfileMapKey] != string(configv1.NoneAuditProfileType) { + deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, corev1.Container{ + Name: "audit-logs", + Image: image, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: []string{"/bin/bash"}, + Args: []string{ + "-c", + kas.RenderAuditLogScript(fmt.Sprintf("%s/%s", volumeMounts.Path(oauthContainerMain().Name, oauthVolumeWorkLogs().Name), "audit.log")), + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("5m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: oauthVolumeWorkLogs().Name, + MountPath: volumeMounts.Path(oauthContainerMain().Name, oauthVolumeWorkLogs().Name), + }}, + }) + } + + if auditWebhookRef != nil { + applyOauthAuditWebhookConfigFileVolume(&deployment.Spec.Template.Spec, auditWebhookRef) + } + deploymentConfig.ApplyTo(deployment) if len(identityProviders) > 0 { - _, volumeMountInfo, err := convertIdentityProviders(ctx, identityProviders, providerOverrides, client, deployment.Namespace) - if err != nil { - return err + _, volumeMountInfo, _ := ConvertIdentityProviders(ctx, identityProviders, providerOverrides, client, deployment.Namespace) + // Ignore the error here, since we don't want to fail the deployment if the identity providers are invalid + // A condition will be set on the HC to indicate the error + if len(volumeMountInfo.Volumes) > 0 { + deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volumeMountInfo.Volumes...) + deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMountInfo.VolumeMounts.ContainerMounts(oauthContainerMain().Name)...) } - deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volumeMountInfo.Volumes...) - deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMountInfo.VolumeMounts.ContainerMounts(oauthContainerMain().Name)...) } globalconfig.ApplyNamedCertificateMounts(oauthContainerMain().Name, oauthNamedCertificateMountPathPrefix, namedCertificates, &deployment.Spec.Template.Spec) - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec) return nil } @@ -134,27 +183,45 @@ func oauthContainerMain() *corev1.Container { } } -func buildOAuthContainerMain(image string, noProxy []string) func(c *corev1.Container) { +func buildOAuthContainerMain(image string, auditWebhookRef *corev1.LocalObjectReference, noProxy []string) func(c *corev1.Container) { return func(c *corev1.Container) { c.Image = image c.Args = []string{ "osinserver", fmt.Sprintf("--config=%s", path.Join(volumeMounts.Path(c.Name, oauthVolumeConfig().Name), OAuthServerConfigKey)), + "--audit-log-format=json", + "--audit-log-maxbackup=1", + "--audit-log-maxsize=10", + fmt.Sprintf("--audit-log-path=%s", path.Join(volumeMounts.Path(c.Name, oauthVolumeWorkLogs().Name), "audit.log")), + fmt.Sprintf("--audit-policy-file=%s", path.Join(volumeMounts.Path(c.Name, oauthVolumeAuditConfig().Name), auditPolicyConfigMapKey)), + } + + if auditWebhookRef != nil { + c.Args = append(c.Args, fmt.Sprintf("--audit-webhook-config-file=%s", oauthAuditWebhookConfigFile())) + c.Args = append(c.Args, "--audit-webhook-mode=batch") } + c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) c.WorkingDir = volumeMounts.Path(c.Name, oauthVolumeWorkLogs().Name) c.Env = []corev1.EnvVar{ + /** NOTE: + For identity providers that rely on HTTP/S, we use the http konnectivity proxy, since it + can route traffic through the customer-configured HTTP/S proxy. + For identity providers such as LDAP that do not use HTTP/S, we use the socks5 proxy. + LDAP uses the the ALL_PROXY variable, but not HTTP_PROXY or HTTPS_PROXY. + See: https://github.com/openshift/library-go/pull/1388 + **/ { Name: "HTTP_PROXY", - Value: fmt.Sprintf("socks5://127.0.0.1:%d", kas.KonnectivityServerLocalPort), + Value: fmt.Sprintf("http://127.0.0.1:%d", httpKonnectivityProxyPort), }, { Name: "HTTPS_PROXY", - Value: fmt.Sprintf("socks5://127.0.0.1:%d", kas.KonnectivityServerLocalPort), + Value: fmt.Sprintf("http://127.0.0.1:%d", httpKonnectivityProxyPort), }, { Name: "ALL_PROXY", - Value: fmt.Sprintf("socks5://127.0.0.1:%d", kas.KonnectivityServerLocalPort), + Value: fmt.Sprintf("socks5://127.0.0.1:%d", socks5KonnectivityProxyPort), }, { Name: "NO_PROXY", @@ -164,12 +231,74 @@ func buildOAuthContainerMain(image string, noProxy []string) func(c *corev1.Cont } } +func oauthContainerHTTPProxy() *corev1.Container { + return &corev1.Container{ + Name: "http-proxy", + } +} + +func buildOAuthContainerHTTPProxy(image string, proxyConfig *configv1.ProxySpec, noProxy string) func(c *corev1.Container) { + return func(c *corev1.Container) { + c.Image = image + c.Command = []string{"/usr/bin/control-plane-operator", "konnectivity-https-proxy"} + c.Args = []string{"run", fmt.Sprintf("--serving-port=%d", httpKonnectivityProxyPort), "--connect-directly-to-cloud-apis"} + if proxyConfig != nil { + c.Args = append(c.Args, "--http-proxy", proxyConfig.HTTPProxy) + c.Args = append(c.Args, "--https-proxy", proxyConfig.HTTPSProxy) + c.Args = append(c.Args, "--no-proxy", noProxy) + } + c.Env = []corev1.EnvVar{{ + Name: "KUBECONFIG", + Value: fmt.Sprintf("%s/kubeconfig", volumeMounts.Path(c.Name, oauthVolumeKubeconfig().Name)), + }} + c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) + } +} + +func oauthContainerSocks5Proxy() *corev1.Container { + return &corev1.Container{ + Name: "socks5-proxy", + } +} + +func buildOAuthContainerSocks5Proxy(image string) func(c *corev1.Container) { + return func(c *corev1.Container) { + c.Image = image + c.Command = []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy"} + c.Args = []string{"run", "--resolve-from-guest-cluster-dns=true", "--resolve-from-management-cluster-dns=true"} + c.Env = []corev1.EnvVar{{ + Name: "KUBECONFIG", + Value: fmt.Sprintf("%s/kubeconfig", volumeMounts.Path(c.Name, oauthVolumeKubeconfig().Name)), + }} + c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) + } +} + func oauthVolumeConfig() *corev1.Volume { return &corev1.Volume{ Name: "oauth-config", } } +func oauthVolumeAuditConfig() *corev1.Volume { + return &corev1.Volume{ + Name: "audit-config", + } +} + +func oauthAuditWebhookConfigFileVolume() *corev1.Volume { + return &corev1.Volume{ + Name: "oauth-audit-webhook", + } +} + +func buildOauthAuditWebhookConfigFileVolume(auditWebhookRef *corev1.LocalObjectReference) func(v *corev1.Volume) { + return func(v *corev1.Volume) { + v.Secret = &corev1.SecretVolumeSource{} + v.Secret.SecretName = auditWebhookRef.Name + } +} + func buildOAuthVolumeConfig(v *corev1.Volume) { v.ConfigMap = &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ @@ -178,6 +307,11 @@ func buildOAuthVolumeConfig(v *corev1.Volume) { } } +func buildOAuthVolumeAuditConfig(v *corev1.Volume) { + v.ConfigMap = &corev1.ConfigMapVolumeSource{} + v.ConfigMap.Name = manifests.OAuthAuditConfig("").Name +} + func oauthVolumeWorkLogs() *corev1.Volume { return &corev1.Volume{ Name: "logs", @@ -196,7 +330,7 @@ func oauthVolumeKubeconfig() *corev1.Volume { func buildOAuthVolumeKubeconfig(v *corev1.Volume) { v.Secret = &corev1.SecretVolumeSource{ - DefaultMode: utilpointer.Int32(0640), + DefaultMode: ptr.To[int32](0640), SecretName: manifests.KASServiceKubeconfigSecret("").Name, } } @@ -208,7 +342,7 @@ func oauthVolumeServingCert() *corev1.Volume { func buildOAuthVolumeServingCert(v *corev1.Volume) { v.Secret = &corev1.SecretVolumeSource{ - DefaultMode: utilpointer.Int32(0640), + DefaultMode: ptr.To[int32](0640), SecretName: manifests.OpenShiftOAuthServerCert("").Name, } } @@ -219,7 +353,7 @@ func oauthVolumeSessionSecret() *corev1.Volume { } func buildOAuthVolumeSessionSecret(v *corev1.Volume) { v.Secret = &corev1.SecretVolumeSource{ - DefaultMode: utilpointer.Int32(0640), + DefaultMode: ptr.To[int32](0640), SecretName: manifests.OAuthServerServiceSessionSecret("").Name, } } @@ -237,7 +371,7 @@ func BuildOAuthVolumeErrorTemplate(v *corev1.Volume, params *OAuthConfigParams) } v.Secret = &corev1.SecretVolumeSource{ - DefaultMode: utilpointer.Int32(0640), + DefaultMode: ptr.To[int32](0640), SecretName: errorTemplateSecret, } } @@ -256,7 +390,7 @@ func BuildOAuthVolumeLoginTemplate(v *corev1.Volume, params *OAuthConfigParams) } v.Secret = &corev1.SecretVolumeSource{ - DefaultMode: utilpointer.Int32(0640), + DefaultMode: ptr.To[int32](0640), SecretName: loginTemplateSecret, } } @@ -275,7 +409,7 @@ func BuildOAuthVolumeProvidersTemplate(v *corev1.Volume, params *OAuthConfigPara } v.Secret = &corev1.SecretVolumeSource{ - DefaultMode: utilpointer.Int32(0640), + DefaultMode: ptr.To[int32](0640), SecretName: providersTemplateSecret, } } @@ -291,28 +425,47 @@ func buildOAuthVolumeMasterCABundle(v *corev1.Volume) { v.ConfigMap.Name = manifests.OpenShiftOAuthMasterCABundle("").Name } -func socks5ProxyContainer(socks5ProxyImage string) corev1.Container { - c := corev1.Container{ - Name: socks5ProxyContainerName, - Image: socks5ProxyImage, - Command: []string{"/usr/bin/control-plane-operator", "konnectivity-socks5-proxy", "--resolve-from-guest-cluster-dns=true", "--resolve-from-management-cluster-dns=true"}, - Args: []string{"run"}, - Env: []corev1.EnvVar{{ - Name: "KUBECONFIG", - Value: "/etc/kubernetes/kubeconfig", - }}, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("10m"), - corev1.ResourceMemory: resource.MustParse("10Mi"), - }, - }, - VolumeMounts: []corev1.VolumeMount{ - {Name: "admin-kubeconfig", MountPath: "/etc/kubernetes"}, - {Name: "konnectivity-proxy-cert", MountPath: "/etc/konnectivity/proxy-client"}, - {Name: "konnectivity-proxy-ca", MountPath: "/etc/konnectivity/proxy-ca"}, - }, +func oauthVolumeKonnectivityProxyClientCert() *corev1.Volume { + return &corev1.Volume{ + Name: "konnectivity-proxy-cert", + } +} + +func buildOAuthVolumeKonnectivityProxyClientCert(v *corev1.Volume) { + v.Secret = &corev1.SecretVolumeSource{ + SecretName: manifests.KonnectivityClientSecret("").Name, + DefaultMode: ptr.To[int32](0640), + } +} + +func oauthVolumeKonnectivityProxyTrustBundle() *corev1.Volume { + return &corev1.Volume{ + Name: "konnectivity-proxy-ca", } +} + +func buildOAuthVolumeKonnectivityProxyTrustBundle(v *corev1.Volume) { + v.ConfigMap = &corev1.ConfigMapVolumeSource{DefaultMode: ptr.To[int32](0640)} + v.ConfigMap.Name = manifests.KonnectivityCAConfigMap("").Name +} + +func applyOauthAuditWebhookConfigFileVolume(podSpec *corev1.PodSpec, auditWebhookRef *corev1.LocalObjectReference) { + podSpec.Volumes = append(podSpec.Volumes, util.BuildVolume(oauthAuditWebhookConfigFileVolume(), buildOauthAuditWebhookConfigFileVolume(auditWebhookRef))) + var container *corev1.Container + for i, c := range podSpec.Containers { + if c.Name == oauthContainerMain().Name { + container = &podSpec.Containers[i] + break + } + } + if container == nil { + panic("main oauth openshift container oauth-server not found in spec") + } + container.VolumeMounts = append(container.VolumeMounts, + oauthAuditWebhookConfigFileVolumeMount.ContainerMounts(oauthContainerMain().Name)...) +} - return c +func oauthAuditWebhookConfigFile() string { + cfgDir := oauthAuditWebhookConfigFileVolumeMount.Path(oauthContainerMain().Name, oauthAuditWebhookConfigFileVolume().Name) + return path.Join(cfgDir, hyperv1.AuditWebhookKubeconfigKey) } diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment_test.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment_test.go new file mode 100644 index 0000000000..7d6aec26d9 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/deployment_test.go @@ -0,0 +1,81 @@ +package oauth + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/api" + "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests" + hyperapi "github.com/openshift/hypershift/support/api" + "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/testutil" + "github.com/openshift/hypershift/support/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +// Ensure certain deployment fields do not get set +func TestReconcileOauthDeploymentNoChanges(t *testing.T) { + + // Setup expected values that are universal + imageName := "oauthImage" + + // Setup hypershift hosted control plane. + targetNamespace := "test" + oauthDeployment := manifests.OAuthDeployment(targetNamespace) + hcp := &hyperv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hcp", + Namespace: targetNamespace, + }, + } + hcp.Name = "name" + hcp.Namespace = "namespace" + ownerRef := config.OwnerRefFrom(hcp) + webhookRef := &corev1.LocalObjectReference{ + Name: "test-webhook-audit-secret", + } + + testCases := []struct { + cm corev1.ConfigMap + auditCM corev1.ConfigMap + deploymentConfig config.DeploymentConfig + serverParams OAuthServerParams + configParams OAuthConfigParams + }{ + // empty deployment config + { + cm: corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-oauth-config", + Namespace: targetNamespace, + }, + Data: map[string]string{"config.yaml": "test-data"}, + }, + deploymentConfig: config.DeploymentConfig{}, + serverParams: OAuthServerParams{ + AvailabilityProberImage: "test-availability-image", + ProxyImage: "test-socks-5-proxy-image", + AuditWebhookRef: webhookRef, + }, + }, + } + for _, tc := range testCases { + g := NewGomegaWithT(t) + ctx := context.Background() + fakeClient := fake.NewClientBuilder().WithScheme(hyperapi.Scheme).Build() + oauthDeployment.Spec.MinReadySeconds = 60 + expectedMinReadySeconds := oauthDeployment.Spec.MinReadySeconds + err := ReconcileDeployment(ctx, fakeClient, oauthDeployment, tc.serverParams.AuditWebhookRef, ownerRef, &tc.cm, &tc.auditCM, imageName, tc.deploymentConfig, tc.serverParams.IdentityProviders(), tc.serverParams.OauthConfigOverrides, + tc.serverParams.AvailabilityProberImage, tc.serverParams.NamedCertificates(), tc.serverParams.ProxyImage, nil, "", tc.serverParams.OAuthNoProxy, &tc.configParams, hyperv1.IBMCloudPlatform) + g.Expect(err).To(BeNil()) + g.Expect(expectedMinReadySeconds).To(Equal(oauthDeployment.Spec.MinReadySeconds)) + + deploymentYaml, err := util.SerializeResource(oauthDeployment, api.Scheme) + g.Expect(err).To(BeNil()) + testutil.CompareWithFixture(t, deploymentYaml) + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/files/audit-policy.yaml b/control-plane-operator/controllers/hostedcontrolplane/oauth/files/audit-policy.yaml new file mode 100644 index 0000000000..df73ea5dfa --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/files/audit-policy.yaml @@ -0,0 +1,18 @@ +# sourced from https://github.com/openshift/cluster-authentication-operator/blob/master/bindata/oauth-openshift/audit-policy.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: audit + namespace: openshift-authentication +data: + audit.yaml: | + apiVersion: audit.k8s.io/v1 + kind: Policy + rules: + - level: None + nonResourceURLs: + - "/healthz*" + - "/logs" + - "/metrics" + - "/version" + - level: Metadata diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert.go index 61d1adcc7d..50af42c49b 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert.go @@ -15,17 +15,23 @@ import ( configv1 "github.com/openshift/api/config/v1" osinv1 "github.com/openshift/api/osin/v1" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + kas "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" + manifests "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/support/konnectivityproxy" + supportproxy "github.com/openshift/hypershift/support/proxy" + "github.com/openshift/hypershift/support/util" + "golang.org/x/net/http/httpproxy" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/cache" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/net" + clientcmd "k8s.io/client-go/tools/clientcmd" "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" crclient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/openshift/hypershift/support/util" ) const ( @@ -77,7 +83,7 @@ func (i *IDPVolumeMountInfo) SecretPath(index int, secretName, field, key string return path.Join(i.VolumeMounts[i.Container][v.Name], key) } -func convertIdentityProviders(ctx context.Context, identityProviders []configv1.IdentityProvider, providerOverrides map[string]*ConfigOverride, kclient crclient.Client, namespace string) ([]osinv1.IdentityProvider, *IDPVolumeMountInfo, error) { +func ConvertIdentityProviders(ctx context.Context, identityProviders []configv1.IdentityProvider, providerOverrides map[string]*ConfigOverride, kclient crclient.Client, namespace string) ([]osinv1.IdentityProvider, *IDPVolumeMountInfo, error) { converted := make([]osinv1.IdentityProvider, 0, len(identityProviders)) errs := []error{} volumeMountInfo := &IDPVolumeMountInfo{ @@ -92,7 +98,7 @@ func convertIdentityProviders(ctx context.Context, identityProviders []configv1. if _, ok := providerOverrides[idp.Name]; ok { providerConfigOverride = providerOverrides[idp.Name] } - data, err := convertProviderConfigToIDPData(ctx, &idp.IdentityProviderConfig, providerConfigOverride, i, volumeMountInfo, kclient, namespace) + data, err := convertProviderConfigToIDPData(ctx, &idp.IdentityProviderConfig, providerConfigOverride, i, volumeMountInfo, kclient, namespace, false) if err != nil { errs = append(errs, fmt.Errorf("failed to apply IDP %s config: %v", idp.Name, err)) continue @@ -135,6 +141,7 @@ func convertProviderConfigToIDPData( idpVolumeMounts *IDPVolumeMountInfo, kclient crclient.Client, namespace string, + skipKonnectivityDialer bool, ) (*idpData, error) { const missingProviderFmt string = "type %s was specified, but its configuration is missing" @@ -345,7 +352,7 @@ func convertProviderConfigToIDPData( openIDProvider.URLs = configOverride.URLs openIDProvider.Claims = configOverride.Claims } else { - urls, err := discoverOpenIDURLs(ctx, kclient, openIDConfig.Issuer, corev1.ServiceAccountRootCAKey, namespace, openIDConfig.CA) + urls, err := discoverOpenIDURLs(ctx, kclient, openIDConfig.Issuer, corev1.ServiceAccountRootCAKey, namespace, openIDConfig.CA, skipKonnectivityDialer) if err != nil { return nil, err } @@ -385,6 +392,7 @@ func convertProviderConfigToIDPData( namespace, openIDConfig.CA, openIDConfig.ClientSecret, + skipKonnectivityDialer, ) if err != nil { return nil, fmt.Errorf("error attempting password grant flow: %v", err) @@ -420,9 +428,80 @@ func convertProviderConfigToIDPData( return data, nil } +const ( + konnectivityClientDataCertKey = "tls.crt" + konnectivityClientDataKey = "tls.key" + konnectivityCADataKey = "ca.crt" + kubeconfigDataKey = "kubeconfig" +) + +func buildKonnectivityDialer(ctx context.Context, kclient crclient.Client, namespace string) (konnectivityproxy.ProxyDialer, error) { + konnectivityClientSecret := manifests.KonnectivityClientSecret(namespace) + if err := kclient.Get(ctx, crclient.ObjectKeyFromObject(konnectivityClientSecret), konnectivityClientSecret); err != nil { + return nil, fmt.Errorf("failed to get konnectivity client secret: %w", err) + } + konnectivityClientCert, exists := konnectivityClientSecret.Data[konnectivityClientDataCertKey] + if !exists || len(konnectivityClientCert) == 0 { + return nil, errors.New("konnectivity client secret has not been populated") + } + + konnectivityClientCertKey, exists := konnectivityClientSecret.Data[konnectivityClientDataKey] + if !exists || len(konnectivityClientCertKey) == 0 { + return nil, errors.New("konnectivity client secret key has not been populated") + } + + konnectivityCAConfigMap := manifests.KonnectivityCAConfigMap(namespace) + if err := kclient.Get(ctx, crclient.ObjectKeyFromObject(konnectivityCAConfigMap), konnectivityCAConfigMap); err != nil { + return nil, fmt.Errorf("failed to get konnectivity CA config map: %w", err) + } + konnectivityCA, exists := konnectivityCAConfigMap.Data[konnectivityCADataKey] + if !exists || len(konnectivityCA) == 0 { + return nil, errors.New("konnectivity CA config map has not been populated") + } + + kubeconfigSecret := manifests.KASServiceKubeconfigSecret(namespace) + if err := kclient.Get(ctx, crclient.ObjectKeyFromObject(kubeconfigSecret), kubeconfigSecret); err != nil { + return nil, fmt.Errorf("failed to get kubeconfig secret: %w", err) + } + kubeconfigData, exists := kubeconfigSecret.Data[kubeconfigDataKey] + if !exists || len(kubeconfigData) == 0 { + return nil, fmt.Errorf("kubeconfig secret has not been populated") + } + + guestClusterConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeconfigSecret.Data["kubeconfig"]) + if err != nil { + return nil, fmt.Errorf("failed to create REST config from kubeconfig: %w", err) + } + + guestClusterClient, err := crclient.New(guestClusterConfig, crclient.Options{}) + if err != nil { + return nil, fmt.Errorf("failed to create client for guest cluster: %w", err) + } + + opts := konnectivityproxy.Options{ + CABytes: []byte(konnectivityCA), + ClientCertBytes: konnectivityClientCert, + ClientKeyBytes: konnectivityClientCertKey, + KonnectivityHost: manifests.KonnectivityServerLocalService("").Name, + KonnectivityPort: kas.KonnectivityServerLocalPort, + ConnectDirectlyToCloudAPIs: false, + ResolveFromManagementClusterDNS: true, + ResolveFromGuestClusterDNS: true, + ResolveBeforeDial: true, + DisableResolver: false, + Client: guestClusterClient, + Log: ctrl.LoggerFrom(ctx), + } + konnectivityDialer, err := konnectivityproxy.NewKonnectivityDialer(opts) + if err != nil { + return nil, err + } + return konnectivityDialer, nil +} + // discoverOpenIDURLs retrieves basic information about an OIDC server with hostname // given by the `issuer` argument -func discoverOpenIDURLs(ctx context.Context, kclient crclient.Client, issuer, key, namespace string, ca configv1.ConfigMapNameReference) (*osinv1.OpenIDURLs, error) { +func discoverOpenIDURLs(ctx context.Context, kclient crclient.Client, issuer, key, namespace string, ca configv1.ConfigMapNameReference, skipKonnectivityDialer bool) (*osinv1.OpenIDURLs, error) { issuer = strings.TrimRight(issuer, "/") // TODO make impossible via validation and remove wellKnown := issuer + "/.well-known/openid-configuration" @@ -440,7 +519,7 @@ func discoverOpenIDURLs(ctx context.Context, kclient crclient.Client, issuer, ke } req = req.WithContext(reqCtx) - rt, err := transportForCARef(ctx, kclient, namespace, ca.Name, key) + rt, err := transportForCARef(ctx, kclient, namespace, ca.Name, key, skipKonnectivityDialer) if err != nil { return nil, err } @@ -497,6 +576,7 @@ func checkOIDCPasswordGrantFlow(ctx context.Context, namespace string, caRererence configv1.ConfigMapNameReference, clientSecretReference configv1.SecretNameReference, + skipKonnectivityDialer bool, ) (bool, error) { log := ctrl.LoggerFrom(ctx) secret := &corev1.Secret{ @@ -522,7 +602,7 @@ func checkOIDCPasswordGrantFlow(ctx context.Context, return false, fmt.Errorf("the referenced secret does not contain a value for the 'clientSecret' key") } - transport, err := transportForCARef(ctx, kclient, namespace, caRererence.Name, corev1.ServiceAccountRootCAKey) + transport, err := transportForCARef(ctx, kclient, namespace, caRererence.Name, corev1.ServiceAccountRootCAKey, skipKonnectivityDialer) if err != nil { return false, fmt.Errorf("couldn't get a transport for the referenced CA: %v", err) } @@ -596,37 +676,101 @@ func isValidURL(rawurl string, optional bool) bool { return u.Scheme == "https" && len(u.Host) > 0 && len(u.Fragment) == 0 } -func transportForCARef(ctx context.Context, kclient crclient.Client, namespace, name, key string) (http.RoundTripper, error) { +func transportForCARef(ctx context.Context, kclient crclient.Client, namespace, caName, caKey string, skipKonnectivityDialer bool) (http.RoundTripper, error) { + var konnectivityDialer konnectivityproxy.ProxyDialer + var userProxyConfig *httpproxy.Config + var userProxyTrustedCA string + // copy default transport transport := net.SetTransportDefaults(&http.Transport{ TLSClientConfig: &tls.Config{}, }) + roots := x509.NewCertPool() + + if !skipKonnectivityDialer { + var err error + // Build dialer for konnectivity. + konnectivityDialer, err = buildKonnectivityDialer(ctx, kclient, namespace) + if err != nil { + return nil, fmt.Errorf("failed to build konnectivity dialer: %w", err) + } + + // Fetch user Proxy info. + hcpList := &hyperv1.HostedControlPlaneList{} + if err := kclient.List(ctx, hcpList, crclient.InNamespace(namespace)); err != nil { + return nil, fmt.Errorf("failed to get hosted control plane list: %w", err) + } + if len(hcpList.Items) != 1 { + return nil, fmt.Errorf("expected one hosted control plane, got %d", len(hcpList.Items)) + } + hcp := hcpList.Items[0] + + if hcp.Spec.Configuration != nil { + if proxy := hcp.Spec.Configuration.Proxy; proxy != nil { + userProxyConfig = &httpproxy.Config{ + HTTPProxy: proxy.HTTPProxy, + HTTPSProxy: proxy.HTTPSProxy, + NoProxy: supportproxy.DefaultNoProxy(&hcp), + } - if len(name) == 0 { + if proxy.TrustedCA.Name != "" { + proxyTrustedCAConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: proxy.TrustedCA.Name, + Namespace: namespace, + }, + } + if err = kclient.Get(ctx, crclient.ObjectKeyFromObject(proxyTrustedCAConfigMap), proxyTrustedCAConfigMap); err != nil { + return nil, fmt.Errorf("failed to get proxy trusted CA config map: %w", err) + } + userProxyTrustedCA = proxyTrustedCAConfigMap.Data["ca-bundle.crt"] + } + } + } + } + + // Set konnectivity dialer values for transport. + if konnectivityDialer != nil { + transport.DialContext = konnectivityDialer.DialContext + } + if userProxyConfig != nil { + userProxyFunc := userProxyConfig.ProxyFunc() + transport.Proxy = func(req *http.Request) (*url.URL, error) { + return userProxyFunc(req.URL) + } + } + if userProxyTrustedCA != "" { + if ok := roots.AppendCertsFromPEM([]byte(userProxyTrustedCA)); !ok { + return nil, fmt.Errorf("error appending proxy trusted CA to transport RootCAs") + } + transport.TLSClientConfig.RootCAs = roots + } + + if len(caName) == 0 { return transport, nil } + // Add CA to transport RootCAs. cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: caName, Namespace: namespace, }, } if err := kclient.Get(ctx, crclient.ObjectKeyFromObject(cm), cm); err != nil { return nil, err } - caData := []byte(cm.Data[key]) + caData := []byte(cm.Data[caKey]) if len(caData) == 0 { - caData = cm.BinaryData[key] + caData = cm.BinaryData[caKey] } if len(caData) == 0 { - return nil, fmt.Errorf("config map %s/%s has no ca data at key %s", namespace, name, key) + return nil, fmt.Errorf("config map %s/%s has no ca data at key %s", namespace, caName, caKey) } - roots := x509.NewCertPool() if ok := roots.AppendCertsFromPEM(caData); !ok { // avoid logging data that could contain keys - return nil, errors.New("error loading cert pool from ca data") + return nil, errors.New("error appending ca to transport RootCAs") } transport.TLSClientConfig.RootCAs = roots return transport, nil diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert_test.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert_test.go index 30ffeea1c3..4c64bcf48c 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert_test.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/idp_convert_test.go @@ -2,15 +2,22 @@ package oauth import ( "context" + "crypto/x509" + "encoding/base64" "fmt" + "net/http" + "net/url" "testing" . "github.com/onsi/gomega" configv1 "github.com/openshift/api/config/v1" osinv1 "github.com/openshift/api/osin/v1" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/support/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -174,7 +181,7 @@ func TestOpenIDProviderConversion(t *testing.T) { t.Run(tc.name, func(t *testing.T) { client := fake.NewClientBuilder().WithObjects(idpSecret).Build() outIDP, err := convertProviderConfigToIDPData(context.TODO(), - tc.idp, nil, 0, volumeMountInfo, client, namespace) + tc.idp, nil, 0, volumeMountInfo, client, namespace, true) g := NewWithT(t) if tc.outErr != nil { g.Expect(err).To(Equal(tc.outErr)) @@ -185,3 +192,200 @@ func TestOpenIDProviderConversion(t *testing.T) { }) } } + +func TestTransportForCARef(t *testing.T) { + namespace := "test" + + testCases := []struct { + name string + hcp *hyperv1.HostedControlPlane + requestToURL string + expectedProxyRequestURL string + }{ + { + name: "When no proxy configuration is provided, the transport should not be modified", + hcp: &hyperv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hcp-test", + Namespace: namespace, + }, + Spec: hyperv1.HostedControlPlaneSpec{}, + }, + requestToURL: "https://test.com", + expectedProxyRequestURL: "", + }, + { + name: "When proxy configuration is provided, the transport should use proxy", + hcp: &hyperv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hcp-test", + Namespace: namespace, + }, + Spec: hyperv1.HostedControlPlaneSpec{ + Configuration: &hyperv1.ClusterConfiguration{ + Proxy: &configv1.ProxySpec{ + HTTPProxy: "", + HTTPSProxy: "https://10.0.0.1", + NoProxy: "", + ReadinessEndpoints: []string{}, + TrustedCA: configv1.ConfigMapNameReference{ + Name: "proxyTrustedCA", + }, + }, + }, + }, + }, + requestToURL: "https://test.com", + expectedProxyRequestURL: "https://10.0.0.1", + }, + { + name: "When proxy configuration is provided and request is to ignored url, the transport should not use proxy", + hcp: &hyperv1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hcp-test", + Namespace: namespace, + }, + Spec: hyperv1.HostedControlPlaneSpec{ + Configuration: &hyperv1.ClusterConfiguration{ + Proxy: &configv1.ProxySpec{ + HTTPProxy: "", + HTTPSProxy: "https://10.0.0.1", + NoProxy: "workload.svc", + ReadinessEndpoints: []string{}, + TrustedCA: configv1.ConfigMapNameReference{ + Name: "proxyTrustedCA", + }, + }, + }, + }, + }, + requestToURL: "workload.svc", + expectedProxyRequestURL: "", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + // Generic fake base64 encoded certificate data. + fakeCertCAData := []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURQRENDQWlTZ0F3SUJBZ0lJTUdGUkF2TUlYL013RFFZSktvWklodmNOQVFFTEJRQXdKakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1SQXdEZ1lEVlFRREV3ZHliMjkwTFdOaE1CNFhEVEkwTURReE5qRTJNemcwTmxvWApEVE0wTURReE5ERTJNemcwTmxvd0pqRVNNQkFHQTFVRUN4TUpiM0JsYm5Ob2FXWjBNUkF3RGdZRFZRUURFd2R5CmIyOTBMV05oTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUF5K01xcmxQbDZpL1kKeXdHaU1lOUZETDRsZFdDMk1TSkRPbGZaci9pbStoeVQzcTBHYnRaZmltR1dWMEtLWm1JMHpveDhodzZKZnR0dAp4bjZLY0N2aEN0ZnBQUWpZa0V2a0NjS2V6dmJYdGt3bjkrTjhlNzR6ejkzYWlWWDIvK3FYOWVBeUdvdU1OYWxFCmk2UDdieUowa3Q5M20vbEYrNWNlQ1NJTS9qTER0VTVEOHJHSUtMbmVTNFZGRHNYckgvL0VDa1R5c3NYUUF5WGcKd3ZwOVBKZlJyK2ZtZk11N2xkOE52TTBucExaQldkUjNrV2QvVzFGZVlSV3JqbmtKQ1ZUM0I4WGZzK2p6M1pCTgpnWU9pdHR3dytLZmVGNWlnRVQ1RGVrMTdncUJVcTZrY3dzQm1VeTYzS0JVa0pMSnB6SExGSVlVVjgyMk9KeFdLCkc2N0EyZUpsNHdJREFRQUJvMjR3YkRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pCSkJnTlZIUTRFUWdSQTBCeElOMVh2MTZiNWdVdXM0anA2Y2Q2NWorcnFkNXluTHlRVEdWNlVQYUxpV1k3RQpsWHVXTTQvSUsvTnRKSzBPdmJObmJhREFyNHR1ZXFSUW1DZ2w1akFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBClVUemp4TldsQ21FaWZ1UmptN3F5K1oxcVRyeVU2V0lmblhlMm1xd1cvWmtva3ZsM0lmcE56czNzWUY4RGNnR3gKcnNZL3BiaFJIN2RtLzdDYkNBUFozSEZBc1dGWmswZEIwd1I2dGVhWXdtbDQvSmZSU0JzZ3JwQ2JmQUJ0MWNVcQpKR1ZhQ3AvQ2ZOcUp0SW9QNitBUldpbnRLQ2xid0JVSS9yUmhvWnVHSEVQZURlc3NTaFpwZmUxK2FDRmFYYVQrCkgzeUk1Qzl2OW5hRDhVdWkxU1J3Vm43SlQ4SVJuSHhtdHY3eUlZL05SL1NWbjBPTkxGbHN3VFREa2o2RVR6TTAKTG8zMGQ4ZmwwSjJ3YmtEekxDc3ozU0lRRjNrL2huR2tIdW5UWUhwWWF0dUZWenZyOVNlSGkrS1lkNllCb25JNApKSWFtZEZsTmZtM3dpS3FtWWZ3SEVnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJRCt6Q0NBdU9nQXdJQkFnSUlGa0hGcFZlVG1XUXdEUVlKS29aSWh2Y05BUUVMQlFBd0pqRVNNQkFHQTFVRQpDeE1KYjNCbGJuTm9hV1owTVJBd0RnWURWUVFERXdkeWIyOTBMV05oTUI0WERUSTBNRFF4TmpFMk16ZzBObG9YCkRUSTFNRFF4TmpFMk16ZzFNMW93TURFU01CQUdBMVVFQ2hNSmIzQmxibk5vYVdaME1Sb3dHQVlEVlFRREV4RnYKY0dWdWMyaHBablF0YVc1bmNtVnpjekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQgpBTFhQYktuTkhRU3pvWDJPWHVETjJWMmRBKzdabzRPTjNERFduZVNWeHZlUXZLRFNIZXVMQUdwb3dheHFPbEd0CnpxRVJWelQzaFQ2NThjd0p5d3VwczFteXdJQ290dE5mZzdadk9NQ0pmZUF2MDBuNmFXWW1JdlhCMjhEWVNRaEkKbnpqb3kyWTNwZkVha2c5VWo0VDl6SkFmaGd4RktqRzZMZ2NBSlgrTk5Zd0tScWxlN1g4SkV6WkhCVmpLOGJILwpWMEdoUDFGS3l5V1JGQ2FkWFVTTTM1NEFIUDJqME0wRENEbXR1bytHR1FGWmlDdnVnQlB6b2ZsUjF5MEpHRlk0ClFiaDBzYVZrRmFEVDd3OEd6Rzk5MHBldXhRZ2xXblF4bUw2ZUwveXlZTmk1TTdONkFZYmZaQWJxTWtmZ1NjeS8KWFpwOCtJRTVMLzVNK2g1aWxDUnBGOVVDQXdFQUFhT0NBU0V3Z2dFZE1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZApCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0RBZ1lJS3dZQkJRVUhBd0V3REFZRFZSMFRBUUgvQkFJd0FEQkpCZ05WCkhRNEVRZ1JBbUlrWTFKR2Z2c25jbkdKOVQvZkl6WmRSeXhObUNmWHJpR2wwdjVuVnlmSTkyM1hrRTNLaHd6NXYKczFrOTBYbkZDM2xmRitETUNocFIySk5Nb2R0c3F6QkxCZ05WSFNNRVJEQkNnRURRSEVnM1ZlL1hwdm1CUzZ6aQpPbnB4M3JtUDZ1cDNuS2N2SkJNWlhwUTlvdUpaanNTVmU1WXpqOGdyODIwa3JRNjlzMmR0b01DdmkyNTZwRkNZCktDWG1NRVlHQTFVZEVRUS9NRDJDT3lvdVlYQndjeTV6YTNWNmJtVjBjeTF0WjIxMExtTnBMbWg1Y0dWeWMyaHAKWm5RdVpHVjJZMngxYzNSbGNpNXZjR1Z1YzJocFpuUXVZMjl0TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFCQQpnU1pMdFJZZGJoNG1JVXgxYWxyVktlR2VRN2lMeWlwL1pBd1kxc2hYTk05ZWEwZ1NLcStGQ1RHS1hmcmZlbVdrCmZRR25LNys0aTIzOUZtN0pmaE1pcU5TZ2R5bVR2djhDYlcxNjFNOVcrTkZoOEV1N2h1V2dMdzBEZHgwMys5ZTAKajFsa0dMODcvcWM2cmM0WmVYRWM5dVV3cWdrK3dSWktnbDMzblNxem42TlNuQ1BTM2hXSEFRVkRsd1NlalVoYQpJcUtxb0kzWkhsY3hybnBNWDM4Y01JYTdOL2svc1hVNVZndkxzYXN6ZjVpUWZOWlk1ZkliT0t3YjVqY1hwRWhYCldoQU84dFkyaWJBQ3BWWHlYYlI1K0VCajF4UDM3SHMvVHNaV3lsWGFJYWwyZ083QWRqVGlwenVwSTBkYmFUakMKRHJnQmFJbjZWWkExQU4zSzlVdmQKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=") + fakeClientCertData := []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUR0ekNDQXArZ0F3SUJBZ0lJZGRxdkFLYUpBNVl3RFFZSktvWklodmNOQVFFTEJRQXdOakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1TQXdIZ1lEVlFRREV4ZGhaRzFwYmkxcmRXSmxZMjl1Wm1sbkxYTnBaMjVsY2pBZQpGdzB5TkRBME1UWXhOak00TkRsYUZ3MHlOVEEwTVRZeE5qTTROVEJhTURBeEZ6QVZCZ05WQkFvVERuTjVjM1JsCmJUcHRZWE4wWlhKek1SVXdFd1lEVlFRREV3eHplWE4wWlcwNllXUnRhVzR3Z2dFaU1BMEdDU3FHU0liM0RRRUIKQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUN4elMvMnlyNTkvQXFqalVOTlR5TW5tSGkvWkcyZW96RjE5eUdtUWtDTQpFcjFSMG9xb2V2OGtoWHNTalduK2FsUUoyaW85ekY2eGo5SjF5aDBmbVFMMzhQbm1NNStPVzYyM0FmbnNQbEI4CjlTRlhJQkdZS1JQaEVZMXYzSi91YVpsb0lDcWRnaHk0VzdFRVVVSXVNK2dLK1ZKdUV4SUhqZnJKMFdjMmRiSysKSk5OWDU3YW9wQjF2ZTFwTktIZkcrN2lCelMyejI2Y3dIUXdsQnFyMjA3MkhadUVzSG5XWXc5ekp6L3dUNm1CdgpNY3ZHeEl6aTRaSVBqVWlWUW5XanJMQ0JONGRGY2dUSUozQW5oSlJlQ1AzNiswelVjcFp3NTVMakp6bFJ5bWRRCk82TVNvVFY0ZUE1elhReTJvVS92T3IzMGthZEpHeng4YlRublFkRjBibkhEQWdNQkFBR2pnYzR3Z2Nzd0RnWUQKVlIwUEFRSC9CQVFEQWdXZ01CTUdBMVVkSlFRTU1Bb0dDQ3NHQVFVRkJ3TUNNQXdHQTFVZEV3RUIvd1FDTUFBdwpTUVlEVlIwT0JFSUVRRENSMW8rb0FrRVV4RUZUS3A0eGtvQnczM2FrOEQvNXJjYkxXR1EwTTJpTVV3Wko5eUVrCmVkMmY0cGdjSkt5ODRCeWVpc0s0UXJNcEJ5VnNZRnhtMGswd1N3WURWUjBqQkVRd1FvQkFWeUtBOTlqSTduSGgKa3ZEM3hCeVJqTWpsWG9MTjZoS0VUTnYrOXVwRTl4RjYxTlcyekNXamUyNURSZi9pd1lOUFV2QXBtTjFJRWhQawpTYTE2L1BCZGJ6QU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FRRUFBNFdWaGUyb294MG1sNmJmWlN3NmtXQXd4VDZZCmh6bG84WGdRM3g3a25hR0pQMVNsQVJYQlA3cEl0cEsvRzk3VW4xTTNRYkcwcWF6S2VZcFNtTWE4cGRPL3lDT0gKNTdkbVZqOElPNk9tamtpT014QUZaWEJkS01SRUNGMFpYKzJadUo1WW9iL0QzVmQzbGxVZ0tNR010TE9GWW1Ubgo5MGFndldXOVRkWGZmTHBER2pRTjJFUWVGVmtkQU5tNU9DRUFiOEt2bS82THc0TldNdzdHUFVwTVl0eElGeVlvCi9oSGhUWUFLRGpvckJkQkpobFBMd1VXeUN6ZFBvRmZUdUpzYzZvSFE0K3FPREY1YkI4UHNkM1pRK0hzT3VTSEcKNXNlU0F4ek5vRjNLY21iUlF2K3JRcnVxVEs3aHd5VStkdjFnMjhYUlBMQitpRm9lY1lIVUJQR1IyQT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K") + fakeClientKeyData := []byte("LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBc2MwdjlzcStmZndLbzQxRFRVOGpKNWg0djJSdG5xTXhkZmNocGtKQWpCSzlVZEtLCnFIci9KSVY3RW8xcC9tcFVDZG9xUGN4ZXNZL1NkY29kSDVrQzkvRDU1ak9mamx1dHR3SDU3RDVRZlBVaFZ5QVIKbUNrVDRSR05iOXlmN21tWmFDQXFuWUljdUZ1eEJGRkNMalBvQ3ZsU2JoTVNCNDM2eWRGbk5uV3l2aVRUVitlMgpxS1FkYjN0YVRTaDN4dnU0Z2MwdHM5dW5NQjBNSlFhcTl0TzloMmJoTEI1MW1NUGN5Yy84RStwZ2J6SEx4c1NNCjR1R1NENDFJbFVKMW82eXdnVGVIUlhJRXlDZHdKNFNVWGdqOSt2dE0xSEtXY09lUzR5YzVVY3BuVUR1akVxRTEKZUhnT2MxME10cUZQN3pxOTlKR25TUnM4ZkcwNTUwSFJkRzV4d3dJREFRQUJBb0lCQUVUQXNURmZTTFh5eGpKawpKNGczZDhLUjVPOHRhRzRWY01USzRVb25DRXFoM0c5TldLeTVrdnVPV2Y3Y2pBWURHNmdMb3BYdTl4YjJKRTNECjcrc09BZVhhV3VlM1FwV0x3ZXFvYXZuOVJxWnJLNDlES1VxTFo5SjZOUlR5WFMyVnkrcEZ0ZlRlSVRqd3k4eDkKbDNmQ1BwSXZ3cjRweGFrQ0w5M21pV0MzdG54cm9BTVluU0RlNVNnazFCZU1vd2pDZDh1T3BGODFaVjZ1ZUVlVgp1TGdNNWQ5ZCt6MWovYVc0M01PUFNKSkcralM2WVROM3lpSDVZSWkrUjdHU21tNjdQM29NZlYxUUdWbDJuMmdxCjBnMEkrZ2I5akFXY3lENHVNNlF2Sisya0o2czR3c2dsZTB5a1RpZEJoeDRHQUFBSmpFdFFNdkR0aG54NGtZM1QKTTNoVEk1RUNnWUVBMS9hWXk4dFZyWjVkdHRLS0FCS21ieFMxL2tCUmVHT1JySVBIZUdGWmdIK2s0NWhCTHlQNQpkcVNSZVd2TCs1SEFRdWt2WFYxUXpmczBwQTRSVGNyNnZ1ZXZsSFVjY2tpVWlJNkxzcjNRSm0ydVJkSVlveWZvClRHaHVFalRwZ2ErVlhlb1ozTVVFSUliWHcvNHFzTERITmYvY3JZWVA0NXh3QWxUeWxLam1ycFVDZ1lFQTBzTjQKcCtVTFRNWDJxZGJjZ2NySXpqUTdzYXBHRWhHVThMdFBQSGM4anJzbHEwWS85WnF2L091NVdOYjBqQ1RSSTR2RQpzRGlkYUlQMmg0Nit3MEpuVkdjSzVPQVVDNVZBcXN3QkZQSExPZ1pUS0xlaFdmTS9vQ2pLSDJnOHRZeWFhUnlRCnhGZzNvajdmc1FRaWc2QVBQS3NqL2U3Q25yMDFRdnVLNHJ0c0FQY0NnWUVBa1I4RlNCVG9DeFliTlVvL0w1TlkKd2RZeUFac280L1JNcEplZEI3aXJFeDB6S1RsYnZCaTVmczlSYmoxUXdra0w0Q3FnQ0dZM2NXTDMyYklXVUtjdwpYZTZFWHdkZlNUQ2FselRxalA3ZUM2U3ljZnFmVWF2MGZydkNFM3Y0Mll1cW5JUStRc3NsWGRJZTFYWkxLNVp2CkYwdEsrRlBaQTROUkJWQWQvbVdOTmcwQ2dZQVpvWS93eXhmK3RDeDFKeDRWNHJWYzdsazhGL3NCZzRYYmFNd1EKREdnZTYzOS9Qc0hVZW9WZ2VzSkZuWTZMNUlaU2psTFRJMjl4SUd0QXZRbFI4YWRqU2t5MjNORlRQMGxuKy9zOQpzdElHTW5LMmh1NW1aQUNlMTVjTkRyNGpUZ0FSUEZvV3Bxdk5YVndTeU8veGxldUVjME9qUkFBREVmdUNNOWtHCkRjanFyUUtCZ1FDeU1JbGNQL3NRODh3eWI0NkJQMkJjVFc3cHRPYW9LOXRLL0REQjBFenAwY2VydkkvdXQ5cEMKZmFISFNJVVVGQ3hHelp3YWtDL3hCYTRnajNXcEtJSTN3YkU5WG0wUWRiMDNRRmRydXBtQUVDOUFWeXpabkZhcgplMkpRUUtWUzZZVHZjbitKZzYvQ1gxeUF0NHM4OXFJU2hwWGQ0c1ZrNENleWhHTUJqNXd1WGc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=") + + // CA ConfigMap used by transportForCARef to set the RootCAs. + fakeCertCADataForConfigMap := []byte("LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURQRENDQWlTZ0F3SUJBZ0lJWFN1V1lMeVlRZVF3RFFZSktvWklodmNOQVFFTEJRQXdKakVTTUJBR0ExVUUKQ3hNSmIzQmxibk5vYVdaME1SQXdEZ1lEVlFRREV3ZHliMjkwTFdOaE1CNFhEVEkwTURReE1qRXdORE16T1ZvWApEVE0wTURReE1ERXdORE16T1Zvd0pqRVNNQkFHQTFVRUN4TUpiM0JsYm5Ob2FXWjBNUkF3RGdZRFZRUURFd2R5CmIyOTBMV05oTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUFyNDNTVWVKUGp4YlAKWkRUUUc1RnpDVnY3VDI0VnpWdFJpWGpoZWFWV1Byc0JTQXdVS1l3M09kSzhTd2pXdEdyS25ZVi9nOXNGNUVWWQpibzZFQ3VyaFVWSEliWjhaU1A3UDNIWnZlSHA5ak00NS9tbE5YSkttdFE4U2NMejdGNmM3YXViQUhHU210b3BOCjZndGwzMjVNV1E0TmZNUHRPSThyUlpBWEthajZGWitmZThHYVVvZGhpdTVHdzdMZGg0U1JXSkpPLzd3ZzAvUnMKTW5BYWcxc2h6UlNYdiszbXFmRXJwUzJNaVBZM0pxamdUcEkwM3VsZHpMMXdoU1ZKYjJIbDBqM0hMZzZFRDlNNwpIMzRsWENxVXl0NStWUlQ4QUYwOWp6eUlKRmZRUlBJNFVIbTBUV0dmaTRhcGNsVEtIbERFUGFibm5OS3RKWE9wCkhXYUllQitSSVFJREFRQUJvMjR3YkRBT0JnTlZIUThCQWY4RUJBTUNBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIKL3pCSkJnTlZIUTRFUWdSQTBNdVRCazdjKzZScUZQQ1FTbWtRcm94emJlR3F5dWhqbzFOUVR2YUpXWEdOanVydgpyc0ZjY3R4TjdhTGlEYWJPODVnVmR1UnlEaGw2SVBPYXE2R1RMVEFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBCm5DcWcveHFSaElIQytjV2NMbENubVplbXVCZjljV2lkQWZXL3JqOUlQaCtSRUhwVC8vRUwyOHpCdEhmcmNXSHcKNzVuT3J2eklPZllBRHo3L25oZkczK1lqK1VPc2RVZFF5aTBTV3JFUEdOUjNaRXRXTUtzL3Nodm5Na3NKQzJldgpxUm9WOVBUbUtlL2RxbVVsNk1kNGVGM0xVeGFObm9aZDcyUWQ2bFdLOUl3dldTYWpTaENrNEl2aThnbWdRTGVJClRYNU9kcDBDdlQ3aENISVcwNnpPVHpib09waGQzWmVGTTVzeUVJMTlsM0dCUmdwQkR5T0NpL3FkNlJLVjhaRWUKQi9Ja2VtUmRwMGJDWS9QbGoxc2Z5L2NjTEF1WEtTM3BWK2N2SjVNS1ZHSmIrZWZtV0M1NE9LdmU1QXNXbzd6VApGRVdxSHNLTElweFZVdnZwM05VZjFBPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQotLS0tLUJFR0lOIENFUlRJRklDQVRFLS0tLS0KTUlJRCt6Q0NBdU9nQXdJQkFnSUlFMnYvcVpVaDJ1VXdEUVlKS29aSWh2Y05BUUVMQlFBd0pqRVNNQkFHQTFVRQpDeE1KYjNCbGJuTm9hV1owTVJBd0RnWURWUVFERXdkeWIyOTBMV05oTUI0WERUSTBNRFF4TWpFd05ETXpPVm9YCkRUSTFNRFF4TWpFd05ETTBOVm93TURFU01CQUdBMVVFQ2hNSmIzQmxibk5vYVdaME1Sb3dHQVlEVlFRREV4RnYKY0dWdWMyaHBablF0YVc1bmNtVnpjekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQgpBTVNwQUVIL2w1ZXNSV1Q4aDZpdXpKUmRkK3ZqYlluc1UzM29vbHlPZkxkTVBnS0U5VTJ6ai9TY3krcTNtaFg4ClhKQkJYcUFSbXlPSHd2bE5vM0pXUFNRYjdKamNtT3UreDFKWFdMTXBGa2pPcW12eTZEMWxqVEJrSXdETjVGZjkKV05xMmZLcjBVdm5yRmxVOWU1bUtETGxicmR1bVU4OUU2bjl1MzNxUExremtvcFVjQ0UxWjdNQ0I2L1hTNytTRAo4M0MwcldWMWZTK292M3ZtWk1vWVY1T2pGcUdFY05TVlFFT0pPbk5ZaHFKd0N5NDkvNEVQc0NHbko5VWh3cUNECjY2Y3lqemVEK20wMUxWRWQrOFNYcFYwZnFXNEFWVHBMMmYvVFBKc2lHVGJSb2pOSFVNWmswbUdFZ2ZhaHFMZTUKd3N4MURQMFFveWZCSHBJVUMvMFFHRDBDQXdFQUFhT0NBU0V3Z2dFZE1BNEdBMVVkRHdFQi93UUVBd0lGb0RBZApCZ05WSFNVRUZqQVVCZ2dyQmdFRkJRY0RBZ1lJS3dZQkJRVUhBd0V3REFZRFZSMFRBUUgvQkFJd0FEQkpCZ05WCkhRNEVRZ1JBbmw4WHN1bVBFbkkvcXMzWnE5K2hzUWpsemVuZjMyUjI4bHF1anhsVmp2YjZ3bTA3dmh3K0JCbngKeVpPZkNPTDN6VFVQR3lNSWo4V0pRZktHNkJJWnp6QkxCZ05WSFNNRVJEQkNnRURReTVNR1R0ejdwR29VOEpCSwphUkN1akhOdDRhcks2R09qVTFCTzlvbFpjWTJPNnUrdXdWeHkzRTN0b3VJTnBzN3ptQlYyNUhJT0dYb2c4NXFyCm9aTXRNRVlHQTFVZEVRUS9NRDJDT3lvdVlYQndjeTVoZW5WeVpTMXRaMjEwTG1oNWNHVnljMmhwWm5RdVlYcDEKY21VdVpHVjJZMngxYzNSbGNpNXZjR1Z1YzJocFpuUXVZMjl0TUEwR0NTcUdTSWIzRFFFQkN3VUFBNElCQVFDTAp3Q1locEtZcFZxMnBNQlRjak5Cc1ZRMkIxVDVOR3pyRmpDSjBDd1hTVGtRR00yQTFZZUROSWR6Y2FpK1hpSC9TCmZOMy9RdkdsMFhsZmxwbWU4NkhZbU1aVEV2eEY4YXd1Vi9pbWNWcnNMa0QzcnBFei8yTytQVHl3bGt1M1kwQWEKRFo0WDBleThiS2RtcFhyY0xUMmVJcjc0L0QrTTVCNlp4NzZVRU1pK3hiUlBlSDkyUFpwZmg0VEFYNTZ2UFBBMgpJQlFqNUg0ZmZpbmVIaE0zMzhMTmdpYzJIMTh2WmNDc3k0WnV5eEVxc1VGSGlUTmNuMGViRVdDTjNOTDh1QzI3CnRoejRhMGxaTVpjQStMRG9OdnJPRXB5QnpHbVdlejVTUHg3VTV5TVlyZllwL3FVaVY5Ky9CNThrTUovRGJoOHMKOVNDYXUzcktsaU5SWmVIZFZDUUIKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=") + fakeCertCADataForConfigMapDecoded := make([]byte, len(fakeCertCADataForConfigMap)) + _, err := base64.StdEncoding.Decode(fakeCertCADataForConfigMapDecoded, fakeCertCADataForConfigMap) + g.Expect(err).ToNot(HaveOccurred()) + caName := "test" + caKey := "test" + caConfigMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: caName, + Namespace: namespace, + }, + Data: map[string]string{ + caKey: string(fakeCertCADataForConfigMapDecoded), + }, + } + + // Proxy CA ConfigMap used by transportForCARef to set the RootCAs. + fakeProxyCertCADecoded := make([]byte, len(fakeCertCAData)) + _, err = base64.StdEncoding.Decode(fakeProxyCertCADecoded, fakeCertCAData) + g.Expect(err).ToNot(HaveOccurred()) + proxyTrustedCA := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "proxyTrustedCA", + Namespace: namespace, + }, + Data: map[string]string{ + "ca-bundle.crt": string(fakeProxyCertCADecoded), + }, + } + + // Konnectivity certs needed by the konnectivity dialer. + konnectivityClientSecret := manifests.KonnectivityClientSecret(namespace) + konnectivityClientSecret.Data = map[string][]byte{ + konnectivityClientDataCertKey: fakeClientCertData, + konnectivityClientDataKey: fakeClientKeyData, + } + konnectivityCAConfigMap := manifests.KonnectivityCAConfigMap(namespace) + konnectivityCA, err := base64.StdEncoding.DecodeString(string(fakeCertCAData)) + g.Expect(err).ToNot(HaveOccurred()) + konnectivityCAConfigMap.Data = map[string]string{ + konnectivityCADataKey: string(konnectivityCA), + } + + // Kubeconfig used by the konnectivity dialer to connect to the guest cluster and resolve SVCs DNS. + kubeconfigSecret := manifests.KASServiceKubeconfigSecret(namespace) + kubeconfigSecret.Data = map[string][]byte{ + kubeconfigDataKey: []byte(fmt.Sprintf(` +apiVersion: v1 +kind: Config +clusters: +- cluster: + certificate-authority-data: %s + server: https://fake.kubernetes.server:6443 + name: fake-cluster +contexts: +- context: + cluster: fake-cluster + user: fake-user + name: fake-context +current-context: fake-context +preferences: {} +users: +- name: fake-user + user: + client-certificate-data: %s + client-key-data: %s +`, fakeCertCAData, fakeClientCertData, fakeClientKeyData)), + } + + // Fake client with all the expected resources. + scheme := scheme.Scheme + err = hyperv1.AddToScheme(scheme) + g.Expect(err).ToNot(HaveOccurred()) + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects( + tc.hcp, + caConfigMap, + konnectivityClientSecret, + konnectivityCAConfigMap, + proxyTrustedCA, + kubeconfigSecret, + ).Build() + + // Run function. + transport, err := transportForCARef(context.Background(), client, namespace, caName, caKey, false) + g.Expect(err).ToNot(HaveOccurred()) + tr := transport.(*http.Transport) + + // Validate proxy expectations. + url, err := tr.Proxy(&http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: tc.requestToURL, + }, + }) + g.Expect(err).ToNot(HaveOccurred()) + gotURL := "" + if url != nil { + gotURL = url.String() + } + g.Expect(gotURL).To(Equal(tc.expectedProxyRequestURL)) + + // Validate RootCAs expectations. + expectedCertPool := x509.NewCertPool() + if tc.hcp.Spec.Configuration != nil { + if tc.hcp.Spec.Configuration.Proxy.TrustedCA.Name != "" { + expectedCertPool.AppendCertsFromPEM([]byte(fakeProxyCertCADecoded)) + } + } + expectedCertPool.AppendCertsFromPEM([]byte(fakeCertCADataForConfigMapDecoded)) + g.Expect(tr.TLSClientConfig.RootCAs.Equal(expectedCertPool)).To(BeTrue()) + + // TODO(alberto): add some validation for DialContext. + }) + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go index 82d8a0e916..8816732324 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/params.go @@ -31,7 +31,8 @@ type OAuthServerParams struct { ExternalAPIPort int32 `json:"externalAPIPort"` OAuthServerImage string config.DeploymentConfig `json:",inline"` - OAuth *configv1.OAuthSpec `json:"oauth"` + OAuth *configv1.OAuthSpec + ProxyConfig *configv1.ProxySpec APIServer *configv1.APIServerSpec `json:"apiServer"` // OauthConfigOverrides contains a mapping from provider name to the config overrides specified for the provider. // The only supported use case of using this is for the IBMCloud IAM OIDC provider. @@ -42,8 +43,13 @@ type OAuthServerParams struct { LoginURLOverride string AvailabilityProberImage string `json:"availabilityProberImage"` Availability hyperv1.AvailabilityPolicy - Socks5ProxyImage string - NoProxy []string + // ProxyImage is the image that contains the control-plane-operator binary that will + // be used to run konnectivity-socks5-proxy and konnectivity-https-proxy + ProxyImage string + // OAuthNoProxy is a list of hosts or IPs that should not be routed through + // konnectivity. Currently only used for IBM Cloud specific addresses. + OAuthNoProxy []string + AuditWebhookRef *corev1.LocalObjectReference } type OAuthConfigParams struct { @@ -88,13 +94,19 @@ func NewOAuthServerParams(hcp *hyperv1.HostedControlPlane, releaseImageProvider OAuthServerImage: releaseImageProvider.GetImage("oauth-server"), AvailabilityProberImage: releaseImageProvider.GetImage(util.AvailabilityProberImageName), Availability: hcp.Spec.ControllerAvailabilityPolicy, - Socks5ProxyImage: releaseImageProvider.GetImage("socks5-proxy"), - NoProxy: []string{manifests.KubeAPIServerService("").Name}, + ProxyImage: releaseImageProvider.GetImage("socks5-proxy"), + OAuthNoProxy: []string{manifests.KubeAPIServerService("").Name}, } if hcp.Spec.Configuration != nil { p.APIServer = hcp.Spec.Configuration.APIServer p.OAuth = hcp.Spec.Configuration.OAuth + p.ProxyConfig = hcp.Spec.Configuration.Proxy } + + if hcp.Spec.AuditWebhook != nil && len(hcp.Spec.AuditWebhook.Name) > 0 { + p.AuditWebhookRef = hcp.Spec.AuditWebhook + } + p.Scheduling = config.Scheduling{ PriorityClass: config.APICriticalPriorityClass, } @@ -108,6 +120,18 @@ func NewOAuthServerParams(hcp *hyperv1.HostedControlPlane, releaseImageProvider corev1.ResourceCPU: resource.MustParse("25m"), }, }, + oauthContainerHTTPProxy().Name: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, + oauthContainerSocks5Proxy().Name: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("10m"), + corev1.ResourceMemory: resource.MustParse("10Mi"), + }, + }, } p.LivenessProbes = config.LivenessProbes{ oauthContainerMain().Name: { @@ -164,7 +188,7 @@ func NewOAuthServerParams(hcp *hyperv1.HostedControlPlane, releaseImageProvider p.SetDefaultSecurityContext = setDefaultSecurityContext if hcp.Spec.Platform.Type == hyperv1.IBMCloudPlatform { - p.NoProxy = append(p.NoProxy, "iam.cloud.ibm.com", "iam.test.cloud.ibm.com") + p.OAuthNoProxy = append(p.OAuthNoProxy, "iam.cloud.ibm.com", "iam.test.cloud.ibm.com") } return p @@ -243,6 +267,16 @@ func (p *OAuthServerParams) ConfigParams(servingCert *corev1.Secret) *OAuthConfi } } +func (p *OAuthServerParams) AuditPolicyConfig() configv1.Audit { + if p.APIServer != nil && p.APIServer.Audit.Profile != "" { + return p.APIServer.Audit + } else { + return configv1.Audit{ + Profile: configv1.DefaultAuditProfileType, + } + } +} + type OAuthServiceParams struct { OAuth *configv1.OAuth `json:"oauth"` OwnerRef config.OwnerRef `json:"ownerRef"` diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go index 1ad588f72f..ab15ade07f 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/pdb.go @@ -1,10 +1,9 @@ package oauth import ( - hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/support/util" policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" ) func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, p *OAuthServerParams) error { @@ -13,17 +12,7 @@ func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, p *OAuthSer MatchLabels: oauthLabels(), } } - p.OwnerRef.ApplyTo(pdb) - - var minAvailable int - switch p.Availability { - case hyperv1.SingleReplica: - minAvailable = 0 - case hyperv1.HighlyAvailable: - minAvailable = 1 - } - pdb.Spec.MinAvailable = &intstr.IntOrString{Type: intstr.Int, IntVal: int32(minAvailable)} - + util.ReconcilePodDisruptionBudget(pdb, p.Availability) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go b/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go index a7e627db98..49887f66d7 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/route.go @@ -10,14 +10,14 @@ import ( "github.com/openshift/hypershift/support/util" ) -func ReconcileExternalPublicRoute(route *routev1.Route, ownerRef config.OwnerRef, hostname string, defaultIngressDomain string) error { +func ReconcileExternalPublicRoute(route *routev1.Route, ownerRef config.OwnerRef, hostname string, defaultIngressDomain string, labelHCPRoutes bool) error { ownerRef.ApplyTo(route) - return util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, manifests.OauthServerService(route.Namespace).Name) + return util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, manifests.OauthServerService(route.Namespace).Name, labelHCPRoutes) } -func ReconcileExternalPrivateRoute(route *routev1.Route, ownerRef config.OwnerRef, hostname string, defaultIngressDomain string) error { +func ReconcileExternalPrivateRoute(route *routev1.Route, ownerRef config.OwnerRef, hostname string, defaultIngressDomain string, labelHCPRoutes bool) error { ownerRef.ApplyTo(route) - if err := util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, manifests.OauthServerService(route.Namespace).Name); err != nil { + if err := util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, manifests.OauthServerService(route.Namespace).Name, labelHCPRoutes); err != nil { return err } if route.Labels == nil { diff --git a/control-plane-operator/controllers/hostedcontrolplane/oauth/testdata/zz_fixture_TestReconcileOauthDeploymentNoChanges.yaml b/control-plane-operator/controllers/hostedcontrolplane/oauth/testdata/zz_fixture_TestReconcileOauthDeploymentNoChanges.yaml new file mode 100644 index 0000000000..64c2080dea --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/oauth/testdata/zz_fixture_TestReconcileOauthDeploymentNoChanges.yaml @@ -0,0 +1,200 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/managed-by: control-plane-operator + name: oauth-openshift + namespace: test + ownerReferences: + - apiVersion: hypershift.openshift.io/v1beta1 + blockOwnerDeletion: true + controller: true + kind: HostedControlPlane + name: name + uid: "" +spec: + minReadySeconds: 60 + replicas: 0 + revisionHistoryLimit: 0 + selector: + matchLabels: + app: oauth-openshift + hypershift.openshift.io/control-plane-component: oauth-openshift + strategy: + rollingUpdate: + maxSurge: 3 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict-local-volumes: logs + oauth.hypershift.openshift.io/config-hash: 24346e1b50066607059af36e3b684b24 + creationTimestamp: null + labels: + app: oauth-openshift + hypershift.openshift.io/control-plane-component: oauth-openshift + spec: + automountServiceAccountToken: false + containers: + - args: + - osinserver + - --config=/etc/kubernetes/config/config.yaml + - --audit-log-format=json + - --audit-log-maxbackup=1 + - --audit-log-maxsize=10 + - --audit-log-path=/var/run/kubernetes/audit.log + - --audit-policy-file=/etc/kubernetes/audit-config/policy.yaml + - --audit-webhook-config-file=/etc/kubernetes/auditwebhook/webhook-kubeconfig + - --audit-webhook-mode=batch + env: + - name: HTTP_PROXY + value: http://127.0.0.1:8092 + - name: HTTPS_PROXY + value: http://127.0.0.1:8092 + - name: ALL_PROXY + value: socks5://127.0.0.1:8090 + - name: NO_PROXY + image: oauthImage + name: oauth-server + resources: {} + volumeMounts: + - mountPath: /etc/kubernetes/audit-config + name: audit-config + - mountPath: /etc/kubernetes/secrets/templates/error + name: error-template + - mountPath: /etc/kubernetes/secrets/svc-kubeconfig + name: kubeconfig + - mountPath: /etc/kubernetes/secrets/templates/login + name: login-template + - mountPath: /var/run/kubernetes + name: logs + - mountPath: /etc/kubernetes/certs/master-ca + name: master-ca-bundle + - mountPath: /etc/kubernetes/config + name: oauth-config + - mountPath: /etc/kubernetes/secrets/templates/providers + name: providers-template + - mountPath: /etc/kubernetes/certs/serving-cert + name: serving-cert + - mountPath: /etc/kubernetes/secrets/session + name: session-secret + - mountPath: /etc/kubernetes/auditwebhook + name: oauth-audit-webhook + workingDir: /var/run/kubernetes + - args: + - run + - --resolve-from-guest-cluster-dns=true + - --resolve-from-management-cluster-dns=true + command: + - /usr/bin/control-plane-operator + - konnectivity-socks5-proxy + env: + - name: KUBECONFIG + value: /etc/kubernetes/kubeconfig + image: test-socks-5-proxy-image + name: socks5-proxy + resources: {} + volumeMounts: + - mountPath: /etc/konnectivity/proxy-ca + name: konnectivity-proxy-ca + - mountPath: /etc/konnectivity/proxy-client + name: konnectivity-proxy-cert + - mountPath: /etc/kubernetes + name: kubeconfig + - args: + - run + - --serving-port=8092 + - --connect-directly-to-cloud-apis + command: + - /usr/bin/control-plane-operator + - konnectivity-https-proxy + env: + - name: KUBECONFIG + value: /etc/kubernetes/kubeconfig + image: test-socks-5-proxy-image + name: http-proxy + resources: {} + volumeMounts: + - mountPath: /etc/konnectivity/proxy-ca + name: konnectivity-proxy-ca + - mountPath: /etc/konnectivity/proxy-client + name: konnectivity-proxy-cert + - mountPath: /etc/kubernetes + name: kubeconfig + - args: + - -c + - "\nset -o errexit\nset -o nounset\nset -o pipefail\n\nfunction cleanup() + {\n\tkill -- -$$\n\twait\n}\ntrap cleanup SIGTERM\n\n/usr/bin/tail -c+1 + -F /var/run/kubernetes/audit.log &\nwait $!\n" + command: + - /bin/bash + image: oauthImage + imagePullPolicy: IfNotPresent + name: audit-logs + resources: + requests: + cpu: 5m + memory: 10Mi + volumeMounts: + - mountPath: /var/run/kubernetes + name: logs + initContainers: + - command: + - /usr/bin/control-plane-operator + - availability-prober + - --target + - https://kube-apiserver:2040/readyz + image: test-availability-image + imagePullPolicy: IfNotPresent + name: availability-prober + resources: {} + volumes: + - configMap: + name: oauth-openshift + name: oauth-config + - name: kubeconfig + secret: + defaultMode: 416 + secretName: service-network-admin-kubeconfig + - name: serving-cert + secret: + defaultMode: 416 + secretName: oauth-server-crt + - name: session-secret + secret: + defaultMode: 416 + secretName: oauth-openshift-session + - name: error-template + secret: + defaultMode: 416 + secretName: oauth-openshift-default-error-template + - name: login-template + secret: + defaultMode: 416 + secretName: oauth-openshift-default-login-template + - name: providers-template + secret: + defaultMode: 416 + secretName: oauth-openshift-default-provider-selection-template + - emptyDir: {} + name: logs + - configMap: + name: oauth-master-ca-bundle + name: master-ca-bundle + - configMap: + name: oauth-openshift-audit + name: audit-config + - name: konnectivity-proxy-cert + secret: + defaultMode: 416 + secretName: konnectivity-client + - configMap: + defaultMode: 416 + name: konnectivity-ca-bundle + name: konnectivity-proxy-ca + - name: oauth-audit-webhook + secret: + secretName: test-webhook-audit-secret +status: {} diff --git a/control-plane-operator/controllers/hostedcontrolplane/ocm/config.go b/control-plane-operator/controllers/hostedcontrolplane/ocm/config.go index 9d2711a280..093934661e 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ocm/config.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ocm/config.go @@ -19,17 +19,17 @@ import ( ) const ( - configKey = "config.yaml" + ConfigKey = "config.yaml" ) -func ReconcileOpenShiftControllerManagerConfig(cm *corev1.ConfigMap, ownerRef config.OwnerRef, deployerImage, dockerBuilderImage, minTLSVersion string, cipherSuites []string, imageConfig *configv1.Image, buildConfig *configv1.Build, networkConfig *configv1.NetworkSpec) error { +func ReconcileOpenShiftControllerManagerConfig(cm *corev1.ConfigMap, ownerRef config.OwnerRef, deployerImage, dockerBuilderImage, minTLSVersion string, cipherSuites []string, imageConfig *configv1.ImageSpec, buildConfig *configv1.Build, networkConfig *configv1.NetworkSpec) error { ownerRef.ApplyTo(cm) if cm.Data == nil { cm.Data = map[string]string{} } config := &openshiftcpv1.OpenShiftControllerManagerConfig{} - if configStr, exists := cm.Data[configKey]; exists && len(configStr) > 0 { + if configStr, exists := cm.Data[ConfigKey]; exists && len(configStr) > 0 { err := util.DeserializeResource(configStr, config, api.Scheme) if err != nil { return fmt.Errorf("unable to decode existing openshift controller manager configuration: %w", err) @@ -42,11 +42,11 @@ func ReconcileOpenShiftControllerManagerConfig(cm *corev1.ConfigMap, ownerRef co if err != nil { return fmt.Errorf("failed to serialize openshift controller manager configuration: %w", err) } - cm.Data[configKey] = configStr + cm.Data[ConfigKey] = configStr return nil } -func reconcileConfig(cfg *openshiftcpv1.OpenShiftControllerManagerConfig, deployerImage, dockerBuilderImage, minTLSVersion string, cipherSuites []string, imageConfig *configv1.Image, buildConfig *configv1.Build, networkConfig *configv1.NetworkSpec) error { +func reconcileConfig(cfg *openshiftcpv1.OpenShiftControllerManagerConfig, deployerImage, dockerBuilderImage, minTLSVersion string, cipherSuites []string, imageConfig *configv1.ImageSpec, buildConfig *configv1.Build, networkConfig *configv1.NetworkSpec) error { cpath := func(volume, file string) string { dir := volumeMounts.Path(ocmContainerMain().Name, volume) return path.Join(dir, file) @@ -56,14 +56,18 @@ func reconcileConfig(cfg *openshiftcpv1.OpenShiftControllerManagerConfig, deploy APIVersion: openshiftcpv1.GroupVersion.String(), } + // Do not modify cfg.Controllers! + // This field is currently owned by the HCCO. + // When we add Capabilities support, we will set Controllers here + // but we have to remove setting it in the HCCO at the same time. + cfg.Build.ImageTemplateFormat.Format = dockerBuilderImage cfg.Deployer.ImageTemplateFormat.Format = deployerImage // registry config - cfg.DockerPullSecret.InternalRegistryHostname = imageConfig.Status.InternalRegistryHostname - cfg.DockerPullSecret.RegistryURLs = imageConfig.Status.ExternalRegistryHostnames - if len(cfg.DockerPullSecret.InternalRegistryHostname) == 0 { - cfg.DockerPullSecret.InternalRegistryHostname = config.DefaultImageRegistryHostname + cfg.DockerPullSecret.InternalRegistryHostname = config.DefaultImageRegistryHostname + if imageConfig != nil { + cfg.DockerPullSecret.RegistryURLs = imageConfig.ExternalRegistryHostnames } // build config diff --git a/control-plane-operator/controllers/hostedcontrolplane/ocm/config_test.go b/control-plane-operator/controllers/hostedcontrolplane/ocm/config_test.go index b73722de1c..4ff29d87e3 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ocm/config_test.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ocm/config_test.go @@ -37,11 +37,7 @@ func TestReconcileOpenShiftControllerManagerConfig(t *testing.T) { } imageProvider := imageprovider.NewFromImages(images) - imageConfig := &v1.Image{ - Status: v1.ImageStatus{ - InternalRegistryHostname: "image-registry.openshift-image-registry.svc:5000", - }, - } + imageConfig := &v1.ImageSpec{} buildConfig := &v1.Build{ Spec: v1.BuildSpec{ @@ -64,7 +60,6 @@ func TestReconcileOpenShiftControllerManagerConfig(t *testing.T) { observedConfig := &globalconfig.ObservedConfig{ Build: buildConfig, - Image: imageConfig, } params := NewOpenShiftControllerManagerParams(hcp, observedConfig, imageProvider, true) diff --git a/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go index ff4d861cc4..7d7df79312 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ocm/deployment.go @@ -42,7 +42,7 @@ func openShiftControllerManagerLabels() map[string]string { } func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, image string, config *corev1.ConfigMap, deploymentConfig config.DeploymentConfig) error { - configBytes, ok := config.Data[configKey] + configBytes, ok := config.Data[ConfigKey] if !ok { return fmt.Errorf("openshift apiserver configuration is not expected to be empty") } @@ -97,7 +97,7 @@ func buildOCMContainerMain(image string) func(*corev1.Container) { c.Args = []string{ "start", "--config", - path.Join(volumeMounts.Path(c.Name, ocmVolumeConfig().Name), configKey), + path.Join(volumeMounts.Path(c.Name, ocmVolumeConfig().Name), ConfigKey), } c.VolumeMounts = volumeMounts.ContainerMounts(c.Name) c.Ports = []corev1.ContainerPort{ diff --git a/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go b/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go index bfecdfb703..021285dd14 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/ocm/params.go @@ -19,7 +19,7 @@ type OpenShiftControllerManagerParams struct { APIServer *configv1.APIServerSpec Network *configv1.NetworkSpec Build *configv1.Build - Image *configv1.Image + Image *configv1.ImageSpec DeploymentConfig config.DeploymentConfig config.OwnerRef @@ -31,11 +31,11 @@ func NewOpenShiftControllerManagerParams(hcp *hyperv1.HostedControlPlane, observ DockerBuilderImage: releaseImageProvider.GetImage("docker-builder"), DeployerImage: releaseImageProvider.GetImage("deployer"), Build: observedConfig.Build, - Image: observedConfig.Image, } if hcp.Spec.Configuration != nil { params.APIServer = hcp.Spec.Configuration.APIServer params.Network = hcp.Spec.Configuration.Network + params.Image = hcp.Spec.Configuration.Image } params.DeploymentConfig = config.DeploymentConfig{ diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/assets/olm-operator-deployment.yaml b/control-plane-operator/controllers/hostedcontrolplane/olm/assets/olm-operator-deployment.yaml index a3ded12f67..5daaa69f25 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/olm/assets/olm-operator-deployment.yaml +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/assets/olm-operator-deployment.yaml @@ -55,7 +55,7 @@ spec: - $(OPERATOR_NAMESPACE) - --writeStatusName - operator-lifecycle-manager - - --writePackageServerStatusName="" + - --writePackageServerStatusName= - --tls-cert - /srv-cert/tls.crt - --tls-key diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go b/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go index 68aa0b7d13..22f18d07c5 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs.go @@ -110,19 +110,19 @@ func reconcileCatalogDeployment(deployment *appsv1.Deployment, ownerRef config.O } func findTagReference(tags []imagev1.TagReference, name string) *imagev1.TagReference { - for _, tag := range tags { + for i, tag := range tags { if tag.Name == name { - return &tag + return &tags[i] } } return nil } -var CatalogToImage map[string]string = map[string]string{ - "certified-operators": "registry.redhat.io/redhat/certified-operator-index:v4.14", - "community-operators": "registry.redhat.io/redhat/community-operator-index:v4.14", - "redhat-marketplace": "registry.redhat.io/redhat/redhat-marketplace-index:v4.14", - "redhat-operators": "registry.redhat.io/redhat/redhat-operator-index:v4.14", +var CatalogToImage = map[string]string{ + "certified-operators": "registry.redhat.io/redhat/certified-operator-index:v4.15", + "community-operators": "registry.redhat.io/redhat/community-operator-index:v4.15", + "redhat-marketplace": "registry.redhat.io/redhat/redhat-marketplace-index:v4.15", + "redhat-operators": "registry.redhat.io/redhat/redhat-operator-index:v4.15", } func ReconcileCatalogsImageStream(imageStream *imagev1.ImageStream, ownerRef config.OwnerRef, isImageRegistryOverrides map[string][]string) error { diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs_test.go b/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs_test.go new file mode 100644 index 0000000000..fcd1841a15 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/catalogs_test.go @@ -0,0 +1,54 @@ +package olm + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestGetCatalogToImageWithISImageRegistryOverrides(t *testing.T) { + tests := []struct { + name string + catalogToImage map[string]string + isImageRegistryOverrides map[string][]string + expected map[string]string + }{ + { + name: "No overrides", + catalogToImage: map[string]string{ + "certified-operators": "registry.redhat.io/redhat/certified-operator-index:v4.16", + "community-operators": "registry.redhat.io/redhat/community-operator-index:v4.16", + }, + isImageRegistryOverrides: map[string][]string{}, + expected: map[string]string{ + "certified-operators": "registry.redhat.io/redhat/certified-operator-index:v4.16", + "community-operators": "registry.redhat.io/redhat/community-operator-index:v4.16", + }, + }, + { + name: "Single override and different tag", + catalogToImage: map[string]string{ + "certified-operators": "registry.redhat.io/redhat/certified-operator-index:v4.17", + "community-operators": "registry.redhat.io/redhat/community-operator-index:v4.17", + }, + isImageRegistryOverrides: map[string][]string{ + "registry.redhat.io": {"custom.registry.io"}, + }, + expected: map[string]string{ + "certified-operators": "custom.registry.io/redhat/certified-operator-index:v4.17", + "community-operators": "custom.registry.io/redhat/community-operator-index:v4.17", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + result := getCatalogToImageWithISImageRegistryOverrides(tt.catalogToImage, tt.isImageRegistryOverrides) + g.Expect(result).To(Equal(tt.expected), "Expected %d entries, but got %d", len(tt.expected), len(result)) + for key, expectedValue := range tt.expected { + g.Expect(expectedValue).To(Equal(result[key]), "For key %s, expected %s, but got %s", key, expectedValue, result[key]) + } + }) + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/deployments.go b/control-plane-operator/controllers/hostedcontrolplane/olm/deployments.go new file mode 100644 index 0000000000..bc9896fccb --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/deployments.go @@ -0,0 +1,43 @@ +package olm + +import ( + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/support/config" + appsv1 "k8s.io/api/apps/v1" +) + +type OLMDeployment struct { + Name string + Manifest *appsv1.Deployment + Reconciler func(*appsv1.Deployment, config.OwnerRef, config.DeploymentConfig, string) error + Image string +} + +func OLMDeployments(p *OperatorLifecycleManagerParams, hcpNamespace string) []OLMDeployment { + return []OLMDeployment{ + { + Name: "certifiedOperatorsDeployment", + Manifest: manifests.CertifiedOperatorsDeployment(hcpNamespace), + Reconciler: ReconcileCertifiedOperatorsDeployment, + Image: p.CertifiedOperatorsCatalogImageOverride, + }, + { + Name: "communityOperatorsDeployment", + Manifest: manifests.CommunityOperatorsDeployment(hcpNamespace), + Reconciler: ReconcileCommunityOperatorsDeployment, + Image: p.CommunityOperatorsCatalogImageOverride, + }, + { + Name: "marketplaceOperatorsDeployment", + Manifest: manifests.RedHatMarketplaceOperatorsDeployment(hcpNamespace), + Reconciler: ReconcileRedHatMarketplaceOperatorsDeployment, + Image: p.RedHatMarketplaceCatalogImageOverride, + }, + { + Name: "redHatOperatorsDeployment", + Manifest: manifests.RedHatOperatorsDeployment(hcpNamespace), + Reconciler: ReconcileRedHatOperatorsDeployment, + Image: p.RedHatOperatorsCatalogImageOverride, + }, + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go b/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go index 90edc62bc6..806a6a2ded 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/operator.go @@ -4,6 +4,7 @@ import ( "strings" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" "github.com/openshift/hypershift/support/assets" prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" @@ -13,13 +14,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" - "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" "github.com/openshift/hypershift/support/certs" "github.com/openshift/hypershift/support/config" "github.com/openshift/hypershift/support/metrics" "github.com/openshift/hypershift/support/util" ) +const ( + catalogOperatorName = "catalog-operator" + olmOperatorName = "olm-operator" +) + var ( catalogOperatorMetricsService = assets.MustService(content.ReadFile, "assets/catalog-metrics-service.yaml") catalogOperatorDeployment = assets.MustDeployment(content.ReadFile, "assets/catalog-operator-deployment.yaml") @@ -29,7 +34,7 @@ var ( ) func olmOperatorLabels() map[string]string { - return map[string]string{"app": "olm-operator", hyperv1.ControlPlaneComponent: "olm-operator"} + return map[string]string{"app": olmOperatorName, hyperv1.ControlPlaneComponent: olmOperatorName} } func ReconcileCatalogOperatorMetricsService(svc *corev1.Service, ownerRef config.OwnerRef) error { @@ -46,13 +51,28 @@ func ReconcileCatalogOperatorMetricsService(svc *corev1.Service, ownerRef config return nil } -func ReconcileCatalogOperatorDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, olmImage, socks5ProxyImage, operatorRegistryImage, releaseVersion string, dc config.DeploymentConfig, availabilityProberImage string, noProxy []string) error { +func ReconcileCatalogOperatorDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, olmImage, socks5ProxyImage, operatorRegistryImage, releaseVersion string, dc config.DeploymentConfig, availabilityProberImage string, noProxy []string, platformType hyperv1.PlatformType) error { ownerRef.ApplyTo(deployment) + + // preserve existing resource requirements + catalogOperatorResources := corev1.ResourceRequirements{} + mainContainer := util.FindContainer(catalogOperatorName, catalogOperatorDeployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + catalogOperatorResources = mainContainer.Resources + } + mainContainer = util.FindContainer(catalogOperatorName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + catalogOperatorResources = mainContainer.Resources + } + } + deployment.Spec = catalogOperatorDeployment.DeepCopy().Spec for i, container := range deployment.Spec.Template.Spec.Containers { switch container.Name { - case "catalog-operator": + case catalogOperatorName: deployment.Spec.Template.Spec.Containers[i].Image = olmImage + deployment.Spec.Template.Spec.Containers[i].Resources = catalogOperatorResources case "socks5-proxy": deployment.Spec.Template.Spec.Containers[i].Image = socks5ProxyImage deployment.Spec.Template.Spec.Containers[i].ImagePullPolicy = corev1.PullIfNotPresent @@ -75,7 +95,7 @@ func ReconcileCatalogOperatorDeployment(deployment *appsv1.Deployment, ownerRef } } dc.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { o.KubeconfigVolumeName = "kubeconfig" o.RequiredAPIs = []schema.GroupVersionKind{ {Group: "operators.coreos.com", Version: "v1alpha1", Kind: "CatalogSource"}, @@ -99,13 +119,28 @@ func ReconcileOLMOperatorMetricsService(svc *corev1.Service, ownerRef config.Own return nil } -func ReconcileOLMOperatorDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, olmImage, socks5ProxyImage, releaseVersion string, dc config.DeploymentConfig, availabilityProberImage string, noProxy []string) error { +func ReconcileOLMOperatorDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, olmImage, socks5ProxyImage, releaseVersion string, dc config.DeploymentConfig, availabilityProberImage string, noProxy []string, platformType hyperv1.PlatformType) error { ownerRef.ApplyTo(deployment) + + // preserve existing resource requirements + olmOperatorResources := corev1.ResourceRequirements{} + mainContainer := util.FindContainer(olmOperatorName, olmOperatorDeployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + olmOperatorResources = mainContainer.Resources + } + mainContainer = util.FindContainer(olmOperatorName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + olmOperatorResources = mainContainer.Resources + } + } + deployment.Spec = olmOperatorDeployment.DeepCopy().Spec for i, container := range deployment.Spec.Template.Spec.Containers { switch container.Name { - case "olm-operator": + case olmOperatorName: deployment.Spec.Template.Spec.Containers[i].Image = olmImage + deployment.Spec.Template.Spec.Containers[i].Resources = olmOperatorResources case "socks5-proxy": deployment.Spec.Template.Spec.Containers[i].Image = socks5ProxyImage deployment.Spec.Template.Spec.Containers[i].ImagePullPolicy = corev1.PullIfNotPresent @@ -124,7 +159,7 @@ func ReconcileOLMOperatorDeployment(deployment *appsv1.Deployment, ownerRef conf } } dc.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { o.KubeconfigVolumeName = "kubeconfig" o.RequiredAPIs = []schema.GroupVersionKind{ {Group: "operators.coreos.com", Version: "v1alpha1", Kind: "CatalogSource"}, diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/operator_test.go b/control-plane-operator/controllers/hostedcontrolplane/olm/operator_test.go new file mode 100644 index 0000000000..31ae2ab733 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/operator_test.go @@ -0,0 +1,104 @@ +package olm + +import ( + "testing" + + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hyperapi "github.com/openshift/hypershift/support/api" + "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/testutil" + "github.com/openshift/hypershift/support/util" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestReconcileCatalogOperatorDeployment(t *testing.T) { + tcs := []struct { + name string + coResources *corev1.ResourceRequirements + }{ + { + name: "Preserve existing resources", + coResources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1000Mi"), + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + dep := &appsv1.Deployment{} + if tc.coResources != nil { + dep.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: catalogOperatorName, + Resources: *tc.coResources, + }, + } + } + + if err := ReconcileCatalogOperatorDeployment(dep, config.OwnerRef{}, "", "", "", "", config.DeploymentConfig{}, "", []string{}, hyperv1.NonePlatform); err != nil { + t.Fatalf("ReconcileCatalogOperatorDeployment: %v", err) + } + + deploymentYaml, err := util.SerializeResource(dep, hyperapi.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, deploymentYaml) + }) + } +} + +func TestReconcileOLMOperatorDeployment(t *testing.T) { + tcs := []struct { + name string + olmOpResources *corev1.ResourceRequirements + }{ + { + name: "Preserve existing resources", + olmOpResources: &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1000Mi"), + }, + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + dep := &appsv1.Deployment{} + if tc.olmOpResources != nil { + dep.Spec.Template.Spec.Containers = []corev1.Container{ + { + Name: olmOperatorName, + Resources: *tc.olmOpResources, + }, + } + } + + if err := ReconcileOLMOperatorDeployment(dep, config.OwnerRef{}, "", "", "", config.DeploymentConfig{}, "", []string{}, hyperv1.NonePlatform); err != nil { + t.Fatalf("ReconcileOLMOperatorDeployment: %v", err) + } + + deploymentYaml, err := util.SerializeResource(dep, hyperapi.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, deploymentYaml) + }) + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/packageserver.go b/control-plane-operator/controllers/hostedcontrolplane/olm/packageserver.go index 0aeb0b5eda..b2e2aa272d 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/olm/packageserver.go +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/packageserver.go @@ -3,28 +3,48 @@ package olm import ( "strings" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" "github.com/openshift/hypershift/support/assets" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" "github.com/openshift/hypershift/support/config" "github.com/openshift/hypershift/support/util" ) +const ( + packageServerName = "packageserver" +) + var ( packageServerDeployment = assets.MustDeployment(content.ReadFile, "assets/packageserver-deployment.yaml") ) -func ReconcilePackageServerDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, olmImage, socks5ProxyImage, releaseVersion string, dc config.DeploymentConfig, availabilityProberImage string, noProxy []string) error { +func ReconcilePackageServerDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, olmImage, socks5ProxyImage, releaseVersion string, dc config.DeploymentConfig, availabilityProberImage string, noProxy []string, platformType hyperv1.PlatformType) error { ownerRef.ApplyTo(deployment) + + // preserve existing resource requirements + packageserverResources := corev1.ResourceRequirements{} + mainContainer := util.FindContainer(packageServerName, packageServerDeployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + packageserverResources = mainContainer.Resources + } + mainContainer = util.FindContainer(packageServerName, deployment.Spec.Template.Spec.Containers) + if mainContainer != nil { + if len(mainContainer.Resources.Requests) > 0 || len(mainContainer.Resources.Limits) > 0 { + packageserverResources = mainContainer.Resources + } + } + deployment.Spec = packageServerDeployment.DeepCopy().Spec for i, container := range deployment.Spec.Template.Spec.Containers { switch container.Name { - case "packageserver": + case packageServerName: deployment.Spec.Template.Spec.Containers[i].Image = olmImage + deployment.Spec.Template.Spec.Containers[i].Resources = packageserverResources case "socks5-proxy": deployment.Spec.Template.Spec.Containers[i].Image = socks5ProxyImage deployment.Spec.Template.Spec.Containers[i].ImagePullPolicy = corev1.PullIfNotPresent @@ -43,7 +63,7 @@ func ReconcilePackageServerDeployment(deployment *appsv1.Deployment, ownerRef co } } dc.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { o.KubeconfigVolumeName = "kubeconfig" o.RequiredAPIs = []schema.GroupVersionKind{ {Group: "operators.coreos.com", Version: "v1alpha1", Kind: "CatalogSource"}, diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/packageserver_test.go b/control-plane-operator/controllers/hostedcontrolplane/olm/packageserver_test.go new file mode 100644 index 0000000000..2208ebf0f0 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/packageserver_test.go @@ -0,0 +1,50 @@ +package olm + +import ( + "testing" + + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/support/config" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestReconcilePackageServerDeployment(t *testing.T) { + t.Run("Packageserver resource preservation", func(t *testing.T) { + dep := &appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: packageServerName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1000Mi"), + }, + }, + }, + }, + }, + }, + }, + } + if err := ReconcilePackageServerDeployment(dep, config.OwnerRef{}, "", "", "", config.DeploymentConfig{}, "", []string{}, hyperv1.NonePlatform); err != nil { + t.Fatalf("ReconcilePackageServerDeployment: %v", err) + } + + // Verify the existing resources were preserved + if dep.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue() != 100 || + dep.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().Value()/(1024*1024) != 100 || + dep.Spec.Template.Spec.Containers[0].Resources.Limits.Cpu().MilliValue() != 1000 || + dep.Spec.Template.Spec.Containers[0].Resources.Limits.Memory().Value()/(1024*1024) != 1000 { + t.Error("some or all existing deployment resources were not preserved") + } + }) +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/params.go b/control-plane-operator/controllers/hostedcontrolplane/olm/params.go index eb4c9363de..83419ee27d 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/olm/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/params.go @@ -66,6 +66,10 @@ func NewOperatorLifecycleManagerParams(hcp *hyperv1.HostedControlPlane, releaseI params.PackageServerConfig.SetRestartAnnotation(hcp.ObjectMeta) params.PackageServerConfig.SetDefaultSecurityContext = setDefaultSecurityContext + if hcp.Spec.Platform.Type == hyperv1.IBMCloudPlatform && hcp.Spec.ControllerAvailabilityPolicy == hyperv1.HighlyAvailable { + params.PackageServerConfig.Replicas = 2 + } + if hcp.Spec.OLMCatalogPlacement == "management" { params.NoProxy = append(params.NoProxy, "certified-operators", "community-operators", "redhat-operators", "redhat-marketplace") } diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/services.go b/control-plane-operator/controllers/hostedcontrolplane/olm/services.go new file mode 100644 index 0000000000..fe22358064 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/services.go @@ -0,0 +1,38 @@ +package olm + +import ( + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/support/config" + corev1 "k8s.io/api/core/v1" +) + +type OLMService struct { + Name string + Manifest *corev1.Service + Reconciler func(*corev1.Service, config.OwnerRef) error +} + +func OLMServices(hcpNamespace string) []OLMService { + return []OLMService{ + { + Name: "certifiedOperatorsService", + Manifest: manifests.CertifiedOperatorsService(hcpNamespace), + Reconciler: ReconcileCertifiedOperatorsService, + }, + { + Name: "communityOperatorsService", + Manifest: manifests.CommunityOperatorsService(hcpNamespace), + Reconciler: ReconcileCommunityOperatorsService, + }, + { + Name: "marketplaceOperatorsService", + Manifest: manifests.RedHatMarketplaceOperatorsService(hcpNamespace), + Reconciler: ReconcileRedHatMarketplaceOperatorsService, + }, + { + Name: "redHatOperatorsService", + Manifest: manifests.RedHatOperatorsService(hcpNamespace), + Reconciler: ReconcileRedHatOperatorsService, + }, + } +} diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/testdata/zz_fixture_TestReconcileCatalogOperatorDeployment_Preserve_existing_resources.yaml b/control-plane-operator/controllers/hostedcontrolplane/olm/testdata/zz_fixture_TestReconcileCatalogOperatorDeployment_Preserve_existing_resources.yaml new file mode 100644 index 0000000000..9cef082572 --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/testdata/zz_fixture_TestReconcileCatalogOperatorDeployment_Preserve_existing_resources.yaml @@ -0,0 +1,148 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/managed-by: control-plane-operator +spec: + replicas: 0 + revisionHistoryLimit: 0 + selector: + matchLabels: + app: catalog-operator + strategy: + type: RollingUpdate + template: + metadata: + annotations: + alpha.image.policy.openshift.io/resolve-names: '*' + creationTimestamp: null + labels: + app: catalog-operator + hypershift.openshift.io/control-plane-component: catalog-operator + spec: + automountServiceAccountToken: false + containers: + - args: + - --namespace + - openshift-marketplace + - --configmapServerImage=$(OPERATOR_REGISTRY_IMAGE) + - --opmImage=$(OPERATOR_REGISTRY_IMAGE) + - --util-image + - $(OLM_OPERATOR_IMAGE) + - --writeStatusName + - operator-lifecycle-manager-catalog + - --tls-cert + - /srv-cert/tls.crt + - --tls-key + - /srv-cert/tls.key + - --client-ca + - /client-ca/ca.crt + - --kubeconfig + - /etc/openshift/kubeconfig/kubeconfig + command: + - /bin/catalog + env: + - name: RELEASE_VERSION + - name: KUBECONFIG + value: /etc/openshift/kubeconfig/kubeconfig + - name: OLM_OPERATOR_IMAGE + - name: OPERATOR_REGISTRY_IMAGE + - name: GRPC_PROXY + value: socks5://127.0.0.1:8090 + - name: NO_PROXY + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 60 + name: catalog-operator + ports: + - containerPort: 8443 + name: metrics + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + resources: + limits: + cpu: "1" + memory: 1000Mi + requests: + cpu: 500m + memory: 500Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /srv-cert + name: srv-cert + readOnly: true + - mountPath: /client-ca + name: profile-collector + readOnly: true + - mountPath: /etc/openshift/kubeconfig + name: kubeconfig + readOnly: true + - args: + - run + command: + - /usr/bin/control-plane-operator + - konnectivity-socks5-proxy + env: + - name: KUBECONFIG + value: /etc/openshift/kubeconfig/kubeconfig + imagePullPolicy: IfNotPresent + name: socks5-proxy + ports: + - containerPort: 8090 + resources: + requests: + cpu: 10m + memory: 15Mi + volumeMounts: + - mountPath: /etc/konnectivity/proxy-client + name: oas-konnectivity-proxy-cert + readOnly: true + - mountPath: /etc/konnectivity/proxy-ca + name: oas-konnectivity-proxy-ca + readOnly: true + - mountPath: /etc/openshift/kubeconfig + name: kubeconfig + readOnly: true + initContainers: + - command: + - /usr/bin/control-plane-operator + - availability-prober + - --target + - https://kube-apiserver:6443/readyz + - --kubeconfig=/var/kubeconfig/kubeconfig + - --required-api=operators.coreos.com,v1alpha1,CatalogSource + imagePullPolicy: IfNotPresent + name: availability-prober + resources: {} + volumeMounts: + - mountPath: /var/kubeconfig + name: kubeconfig + volumes: + - name: srv-cert + secret: + defaultMode: 416 + secretName: catalog-operator-serving-cert + - name: profile-collector + secret: + defaultMode: 416 + secretName: metrics-client + - name: kubeconfig + secret: + defaultMode: 416 + secretName: service-network-admin-kubeconfig + - name: oas-konnectivity-proxy-cert + secret: + defaultMode: 416 + secretName: konnectivity-client + - configMap: + name: konnectivity-ca-bundle + name: oas-konnectivity-proxy-ca +status: {} diff --git a/control-plane-operator/controllers/hostedcontrolplane/olm/testdata/zz_fixture_TestReconcileOLMOperatorDeployment_Preserve_existing_resources.yaml b/control-plane-operator/controllers/hostedcontrolplane/olm/testdata/zz_fixture_TestReconcileOLMOperatorDeployment_Preserve_existing_resources.yaml new file mode 100644 index 0000000000..3a09b5218e --- /dev/null +++ b/control-plane-operator/controllers/hostedcontrolplane/olm/testdata/zz_fixture_TestReconcileOLMOperatorDeployment_Preserve_existing_resources.yaml @@ -0,0 +1,149 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/managed-by: control-plane-operator +spec: + replicas: 0 + revisionHistoryLimit: 0 + selector: + matchLabels: + app: olm-operator + strategy: + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + app: olm-operator + hypershift.openshift.io/control-plane-component: olm-operator + spec: + automountServiceAccountToken: false + containers: + - args: + - --namespace + - $(OPERATOR_NAMESPACE) + - --writeStatusName + - operator-lifecycle-manager + - --writePackageServerStatusName= + - --tls-cert + - /srv-cert/tls.crt + - --tls-key + - /srv-cert/tls.key + - --client-ca + - /client-ca/ca.crt + command: + - /bin/olm + env: + - name: OPERATOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPERATOR_NAME + value: olm-operator + - name: RELEASE_VERSION + - name: KUBECONFIG + value: /etc/openshift/kubeconfig/kubeconfig + - name: GRPC_PROXY + value: socks5://127.0.0.1:8090 + - name: NO_PROXY + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 60 + name: olm-operator + ports: + - containerPort: 8443 + name: metrics + readinessProbe: + httpGet: + path: /healthz + port: 8443 + scheme: HTTPS + resources: + limits: + cpu: "1" + memory: 1000Mi + requests: + cpu: 500m + memory: 500Mi + terminationMessagePolicy: FallbackToLogsOnError + volumeMounts: + - mountPath: /srv-cert + name: srv-cert + readOnly: true + - mountPath: /client-ca + name: client-ca + readOnly: true + - mountPath: /etc/openshift/kubeconfig + name: kubeconfig + readOnly: true + - args: + - run + command: + - /usr/bin/control-plane-operator + - konnectivity-socks5-proxy + env: + - name: KUBECONFIG + value: /etc/openshift/kubeconfig/kubeconfig + imagePullPolicy: IfNotPresent + name: socks5-proxy + ports: + - containerPort: 8090 + resources: + requests: + cpu: 10m + memory: 15Mi + volumeMounts: + - mountPath: /etc/konnectivity/proxy-client + name: oas-konnectivity-proxy-cert + readOnly: true + - mountPath: /etc/konnectivity/proxy-ca + name: oas-konnectivity-proxy-ca + readOnly: true + - mountPath: /etc/openshift/kubeconfig + name: kubeconfig + readOnly: true + initContainers: + - command: + - /usr/bin/control-plane-operator + - availability-prober + - --target + - https://kube-apiserver:6443/readyz + - --kubeconfig=/var/kubeconfig/kubeconfig + - --required-api=operators.coreos.com,v1alpha1,CatalogSource + - --required-api=operators.coreos.com,v1alpha1,Subscription + - --required-api=operators.coreos.com,v2,OperatorCondition + - --required-api=operators.coreos.com,v1,OperatorGroup + - --required-api=operators.coreos.com,v1,OLMConfig + imagePullPolicy: IfNotPresent + name: availability-prober + resources: {} + volumeMounts: + - mountPath: /var/kubeconfig + name: kubeconfig + volumes: + - name: srv-cert + secret: + defaultMode: 416 + secretName: olm-operator-serving-cert + - name: client-ca + secret: + defaultMode: 416 + secretName: metrics-client + - name: kubeconfig + secret: + defaultMode: 416 + secretName: service-network-admin-kubeconfig + - name: oas-konnectivity-proxy-cert + secret: + defaultMode: 416 + secretName: konnectivity-client + - configMap: + name: konnectivity-ca-bundle + name: oas-konnectivity-proxy-ca +status: {} diff --git a/control-plane-operator/controllers/hostedcontrolplane/pki/kas.go b/control-plane-operator/controllers/hostedcontrolplane/pki/kas.go index 0dbad28abc..4d30bf3fa1 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/pki/kas.go +++ b/control-plane-operator/controllers/hostedcontrolplane/pki/kas.go @@ -94,8 +94,7 @@ func ReconcileServiceAccountKubeconfig(secret, csrSigner *corev1.Secret, ca *cor if err := reconcileSignedCert(secret, csrSigner, config.OwnerRef{}, cn, serviceaccount.MakeGroupNames(serviceAccountNamespace), X509UsageClientAuth); err != nil { return fmt.Errorf("failed to reconcile serviceaccount client cert: %w", err) } - svcURL := inClusterKASURL() - + svcURL := inClusterKASURL(hcp.Spec.Platform.Type) return ReconcileKubeConfig(secret, secret, ca, svcURL, "", manifests.KubeconfigScopeLocal, config.OwnerRef{}) } @@ -153,7 +152,10 @@ func generateKubeConfig(url string, crtBytes, keyBytes, caBytes []byte) ([]byte, return clientcmd.Write(kubeCfg) } -func inClusterKASURL() string { +func inClusterKASURL(platformType hyperv1.PlatformType) string { + if platformType == hyperv1.IBMCloudPlatform { + return fmt.Sprintf("https://%s:%d", manifests.KubeAPIServerServiceName, config.KASSVCIBMCloudPort) + } return fmt.Sprintf("https://%s:%d", manifests.KubeAPIServerServiceName, config.KASSVCPort) } diff --git a/control-plane-operator/controllers/hostedcontrolplane/pkioperator/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/pkioperator/reconcile.go index 44639d1b5e..79d2a17858 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/pkioperator/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/pkioperator/reconcile.go @@ -82,8 +82,11 @@ func ReconcileDeployment( }, }, Command: []string{"/usr/bin/control-plane-pki-operator"}, - Args: []string{"operator"}, - Ports: []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 8443}}, + Args: []string{ + "operator", + "--namespace", deployment.Namespace, + }, + Ports: []corev1.ContainerPort{{Name: "metrics", Protocol: "TCP", ContainerPort: 8443}}, }, }, }, @@ -160,6 +163,21 @@ func ReconcileRole(role *rbacv1.Role, ownerRef config.OwnerRef) error { Resources: []string{"leases"}, Verbs: []string{"get", "list", "watch", "create", "delete", "update", "patch"}, }, + { // to approve certificate signing requests + APIGroups: []string{"certificates.hypershift.openshift.io"}, + Resources: []string{"certificatesigningrequestapprovals"}, + Verbs: []string{"get", "list", "watch"}, + }, + { // for certificate revocation + APIGroups: []string{"certificates.hypershift.openshift.io"}, + Resources: []string{"certificaterevocationrequests"}, + Verbs: []string{"get", "list", "watch"}, + }, + { // for certificate revocation + APIGroups: []string{"certificates.hypershift.openshift.io"}, + Resources: []string{"certificaterevocationrequests/status"}, + Verbs: []string{"patch"}, + }, } return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorDeployment.yaml b/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorDeployment.yaml index 15ec3df454..844e6ee2ec 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorDeployment.yaml +++ b/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorDeployment.yaml @@ -15,6 +15,7 @@ metadata: uid: test-uid spec: replicas: 1 + revisionHistoryLimit: 2 selector: matchLabels: name: control-plane-pki-operator @@ -60,6 +61,8 @@ spec: containers: - args: - operator + - --namespace + - test-namespace command: - /usr/bin/control-plane-pki-operator env: diff --git a/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorRole.yaml b/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorRole.yaml index f545ebcd9a..62307daec6 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorRole.yaml +++ b/control-plane-operator/controllers/hostedcontrolplane/pkioperator/testdata/zz_fixture_TestReconcileControlPlanePKIOperatorRole.yaml @@ -62,3 +62,25 @@ rules: - delete - update - patch +- apiGroups: + - certificates.hypershift.openshift.io + resources: + - certificatesigningrequestapprovals + verbs: + - get + - list + - watch +- apiGroups: + - certificates.hypershift.openshift.io + resources: + - certificaterevocationrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.hypershift.openshift.io + resources: + - certificaterevocationrequests/status + verbs: + - patch diff --git a/control-plane-operator/controllers/hostedcontrolplane/registryoperator/reconcile.go b/control-plane-operator/controllers/hostedcontrolplane/registryoperator/reconcile.go index 44c70d735a..d5cea47f26 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/registryoperator/reconcile.go +++ b/control-plane-operator/controllers/hostedcontrolplane/registryoperator/reconcile.go @@ -113,38 +113,6 @@ func NewParams(hcp *hyperv1.HostedControlPlane, version string, releaseImageProv PriorityClass: config.DefaultPriorityClass, }, SetDefaultSecurityContext: setDefaultSecurityContext, - ReadinessProbes: config.ReadinessProbes{ - containerMain().Name: { - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/metrics", - Port: intstr.FromInt(metricsPort), - Scheme: corev1.URISchemeHTTPS, - }, - }, - InitialDelaySeconds: 15, - PeriodSeconds: 60, - SuccessThreshold: 1, - FailureThreshold: 3, - TimeoutSeconds: 5, - }, - }, - LivenessProbes: config.LivenessProbes{ - containerMain().Name: { - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/metrics", - Port: intstr.FromInt(metricsPort), - Scheme: corev1.URISchemeHTTPS, - }, - }, - InitialDelaySeconds: 60, - PeriodSeconds: 60, - SuccessThreshold: 1, - FailureThreshold: 5, - TimeoutSeconds: 5, - }, - }, Resources: config.ResourcesSpec{ containerMain().Name: { Requests: corev1.ResourceList{ diff --git a/control-plane-operator/controllers/hostedcontrolplane/registryoperator/testdata/zz_fixture_TestReconcileDeployment.yaml b/control-plane-operator/controllers/hostedcontrolplane/registryoperator/testdata/zz_fixture_TestReconcileDeployment.yaml index 76c82412bb..b25b39b5e9 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/registryoperator/testdata/zz_fixture_TestReconcileDeployment.yaml +++ b/control-plane-operator/controllers/hostedcontrolplane/registryoperator/testdata/zz_fixture_TestReconcileDeployment.yaml @@ -8,6 +8,7 @@ metadata: namespace: test-namespace spec: replicas: 1 + revisionHistoryLimit: 2 selector: matchLabels: name: cluster-image-registry-operator @@ -87,30 +88,10 @@ spec: - name: AZURE_ENVIRONMENT_FILEPATH value: /tmp/azurestackcloud.json image: quay.io/openshift/cluster-image-registry-operator:latest - livenessProbe: - failureThreshold: 5 - httpGet: - path: /metrics - port: 60000 - scheme: HTTPS - initialDelaySeconds: 60 - periodSeconds: 60 - successThreshold: 1 - timeoutSeconds: 5 name: cluster-image-registry-operator ports: - containerPort: 60000 name: metrics - readinessProbe: - failureThreshold: 3 - httpGet: - path: /metrics - port: 60000 - scheme: HTTPS - initialDelaySeconds: 15 - periodSeconds: 60 - successThreshold: 1 - timeoutSeconds: 5 resources: requests: cpu: 10m diff --git a/control-plane-operator/controllers/hostedcontrolplane/routecm/config_test.go b/control-plane-operator/controllers/hostedcontrolplane/routecm/config_test.go index f276e53556..1eb98b094c 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/routecm/config_test.go +++ b/control-plane-operator/controllers/hostedcontrolplane/routecm/config_test.go @@ -33,7 +33,7 @@ func TestReconcileOpenShiftRouteControllerManagerConfig(t *testing.T) { } imageProvider := imageprovider.NewFromImages(images) - params := NewOpenShiftRouteControllerManagerParams(hcp, nil, imageProvider, true) + params := NewOpenShiftRouteControllerManagerParams(hcp, imageProvider, true) configMap := manifests.OpenShiftRouteControllerManagerConfig(hcp.Namespace) networkConfig := &v1.NetworkSpec{ diff --git a/control-plane-operator/controllers/hostedcontrolplane/routecm/params.go b/control-plane-operator/controllers/hostedcontrolplane/routecm/params.go index 2328f7c996..483cf5e2c7 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/routecm/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/routecm/params.go @@ -9,7 +9,6 @@ import ( hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/imageprovider" "github.com/openshift/hypershift/support/config" - "github.com/openshift/hypershift/support/globalconfig" ) type OpenShiftRouteControllerManagerParams struct { @@ -21,7 +20,7 @@ type OpenShiftRouteControllerManagerParams struct { config.OwnerRef } -func NewOpenShiftRouteControllerManagerParams(hcp *hyperv1.HostedControlPlane, observedConfig *globalconfig.ObservedConfig, releaseImageProvider *imageprovider.ReleaseImageProvider, setDefaultSecurityContext bool) *OpenShiftRouteControllerManagerParams { +func NewOpenShiftRouteControllerManagerParams(hcp *hyperv1.HostedControlPlane, releaseImageProvider *imageprovider.ReleaseImageProvider, setDefaultSecurityContext bool) *OpenShiftRouteControllerManagerParams { params := &OpenShiftRouteControllerManagerParams{ OpenShiftControllerManagerImage: releaseImageProvider.GetImage("route-controller-manager"), } diff --git a/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go b/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go index da7ae018d2..1fb183f36c 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go +++ b/control-plane-operator/controllers/hostedcontrolplane/scheduler/deployment.go @@ -47,7 +47,7 @@ var ( } ) -func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, config config.DeploymentConfig, image string, featureGates []string, policy configv1.ConfigMapNameReference, availabilityProberImage string, ciphers []string, tlsVersion string, disableProfiling bool, schedulerConfig *corev1.ConfigMap) error { +func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef, config config.DeploymentConfig, image string, featureGates []string, policy configv1.ConfigMapNameReference, availabilityProberImage string, ciphers []string, tlsVersion string, disableProfiling bool, schedulerConfig *corev1.ConfigMap, platformType hyperv1.PlatformType) error { ownerRef.ApplyTo(deployment) // preserve existing resource requirements for main scheduler container @@ -94,7 +94,7 @@ func ReconcileDeployment(deployment *appsv1.Deployment, ownerRef config.OwnerRef }, } config.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), availabilityProberImage, &deployment.Spec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), availabilityProberImage, &deployment.Spec.Template.Spec) return nil } diff --git a/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go b/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go index f1c4c45582..b48a7458dd 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go +++ b/control-plane-operator/controllers/hostedcontrolplane/scheduler/params.go @@ -66,22 +66,6 @@ func NewKubeSchedulerParams(ctx context.Context, hcp *hyperv1.HostedControlPlane TimeoutSeconds: 5, }, } - params.ReadinessProbes = config.ReadinessProbes{ - schedulerContainerMain().Name: { - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Path: "/healthz", - Port: intstr.FromInt(schedulerSecurePort), - Scheme: corev1.URISchemeHTTPS, - }, - }, - InitialDelaySeconds: 15, - PeriodSeconds: 60, - SuccessThreshold: 1, - FailureThreshold: 3, - TimeoutSeconds: 5, - }, - } params.DeploymentConfig.SetDefaults(hcp, labels, nil) params.DeploymentConfig.SetRestartAnnotation(hcp.ObjectMeta) params.SetDefaultSecurityContext = setDefaultSecurityContext diff --git a/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator.go b/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator.go index fc6881f1b4..d9b2349471 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator.go +++ b/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator.go @@ -1,6 +1,7 @@ package snapshotcontroller import ( + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/assets" @@ -21,7 +22,8 @@ var ( func ReconcileOperatorDeployment( deployment *appsv1.Deployment, - params *Params) error { + params *Params, + platformType hyperv1.PlatformType) error { params.OwnerRef.ApplyTo(deployment) deployment.Spec = operatorDeployment.DeepCopy().Spec @@ -45,7 +47,7 @@ func ReconcileOperatorDeployment( } } params.DeploymentConfig.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), params.AvailabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), params.AvailabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { o.KubeconfigVolumeName = "guest-kubeconfig" o.RequiredAPIs = []schema.GroupVersionKind{ {Group: "operator.openshift.io", Version: "v1", Kind: "CSISnapshotController"}, diff --git a/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator_test.go b/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator_test.go index 21c8333802..1f243c9dc4 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator_test.go +++ b/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/operator_test.go @@ -35,7 +35,7 @@ func TestReconcileOperatorDeployment(t *testing.T) { deployment := manifests.CSISnapshotControllerOperatorDeployment("test-namespace") imageProvider := imageprovider.NewFromImages(images) params := NewParams(hcp, "1.0.0", imageProvider, true) - if err := ReconcileOperatorDeployment(deployment, params); err != nil { + if err := ReconcileOperatorDeployment(deployment, params, hyperv1.NonePlatform); err != nil { t.Fatalf("unexpected error: %v", err) } deploymentYaml, err := util.SerializeResource(deployment, api.Scheme) diff --git a/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/testdata/zz_fixture_TestReconcileOperatorDeployment.yaml b/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/testdata/zz_fixture_TestReconcileOperatorDeployment.yaml index 8e7ff4c1b4..c074b6da21 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/testdata/zz_fixture_TestReconcileOperatorDeployment.yaml +++ b/control-plane-operator/controllers/hostedcontrolplane/snapshotcontroller/testdata/zz_fixture_TestReconcileOperatorDeployment.yaml @@ -15,6 +15,7 @@ metadata: uid: "" spec: replicas: 1 + revisionHistoryLimit: 2 selector: matchLabels: app: csi-snapshot-controller-operator diff --git a/control-plane-operator/controllers/hostedcontrolplane/storage/operator.go b/control-plane-operator/controllers/hostedcontrolplane/storage/operator.go index ec44375896..03f559846d 100644 --- a/control-plane-operator/controllers/hostedcontrolplane/storage/operator.go +++ b/control-plane-operator/controllers/hostedcontrolplane/storage/operator.go @@ -1,6 +1,7 @@ package storage import ( + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/common" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/kas" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/storage/assets" @@ -20,7 +21,8 @@ var ( func ReconcileOperatorDeployment( deployment *appsv1.Deployment, - params *Params) error { + params *Params, + platformType hyperv1.PlatformType) error { params.OwnerRef.ApplyTo(deployment) deployment.Spec = operatorDeployment.DeepCopy().Spec @@ -33,7 +35,7 @@ func ReconcileOperatorDeployment( } params.DeploymentConfig.ApplyTo(deployment) - util.AvailabilityProber(kas.InClusterKASReadyURL(), params.AvailabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { + util.AvailabilityProber(kas.InClusterKASReadyURL(platformType), params.AvailabilityProberImage, &deployment.Spec.Template.Spec, func(o *util.AvailabilityProberOpts) { o.KubeconfigVolumeName = "guest-kubeconfig" o.RequiredAPIs = []schema.GroupVersionKind{ {Group: "operator.openshift.io", Version: "v1", Kind: "Storage"}, diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go b/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go index f2ea722faa..5626f0b782 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/hcpstatus/hcpstatus.go @@ -113,11 +113,11 @@ func (h *hcpStatusReconciler) reconcile(ctx context.Context, hcp *hyperv1.Hosted hyperv1.ClusterVersionAvailable: findClusterOperatorStatusCondition(clusterVersion.Status.Conditions, configv1.OperatorAvailable), } - for conditionType := range cvoConditions { + for conditionType, condition := range cvoConditions { var hcpCVOCondition metav1.Condition // Set unknown status. var unknownStatusMessage string - if cvoConditions[conditionType] == nil { + if condition == nil { unknownStatusMessage = "Condition not found in the CVO." } if err != nil { @@ -132,9 +132,9 @@ func (h *hcpStatusReconciler) reconcile(ctx context.Context, hcp *hyperv1.Hosted ObservedGeneration: hcp.Generation, } - if err == nil && cvoConditions[conditionType] != nil { + if err == nil && condition != nil { // Bubble up info from CVO. - reason := cvoConditions[conditionType].Reason + reason := condition.Reason // reason is not required in ClusterOperatorStatusCondition, but it's in metav1.conditions. // So we need to make sure the input does not break the KAS expectation. if reason == "" { @@ -142,13 +142,20 @@ func (h *hcpStatusReconciler) reconcile(ctx context.Context, hcp *hyperv1.Hosted } hcpCVOCondition = metav1.Condition{ Type: string(conditionType), - Status: metav1.ConditionStatus(cvoConditions[conditionType].Status), + Status: metav1.ConditionStatus(condition.Status), Reason: reason, - Message: cvoConditions[conditionType].Message, + Message: condition.Message, ObservedGeneration: hcp.Generation, } } + // If CVO has no Upgradeable condition, consider the HCP upgradable according to the CVO + if conditionType == hyperv1.ClusterVersionUpgradeable && + condition == nil { + hcpCVOCondition.Status = metav1.ConditionTrue + hcpCVOCondition.Reason = hyperv1.FromClusterVersionReason + } + meta.SetStatusCondition(&hcp.Status.Conditions, hcpCVOCondition) } log.Info("Finished reconciling hosted cluster version conditions") diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts/assets/podsecurity-violation.yaml b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts/assets/podsecurity-violation.yaml new file mode 100644 index 0000000000..d270a3728a --- /dev/null +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts/assets/podsecurity-violation.yaml @@ -0,0 +1,40 @@ +# Source: https://raw.githubusercontent.com/openshift/cluster-kube-apiserver-operator/release-4.15/bindata/assets/alerts/podsecurity-violations.yaml +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: podsecurity + namespace: openshift-kube-apiserver +spec: + groups: + - name: pod-security-violation + rules: + - alert: PodSecurityViolation + annotations: + summary: One or more workloads users created in the cluster don't match their Pod Security profile + description: >- + A workload (pod, deployment, daemonset, ...) was created somewhere in the cluster but it + did not match the PodSecurity "{{ $labels.policy_level }}" profile defined by its namespace either via the cluster-wide + configuration (which triggers on a "restricted" profile violations) or by the namespace + local Pod Security labels. + Refer to Kubernetes documentation on Pod Security Admission to learn more about these + violations. + expr: | + sum(increase(pod_security_evaluations_total{decision="deny",mode="audit",resource="pod",ocp_namespace=""}[1d])) by (policy_level, ocp_namespace) > 0 + labels: + namespace: openshift-kube-apiserver + severity: info + - alert: PodSecurityViolation + annotations: + summary: One or more workloads in platform namespaces of the cluster don't match their Pod Security profile + description: >- + A workload (pod, deployment, daemonset, ...) was created in namespace "{{ $labels.ocp_namespace }}" but it + did not match the PodSecurity "{{ $labels.policy_level }}" profile defined by its namespace either via the cluster-wide + configuration (which triggers on a "restricted" profile violations) or by the namespace + local Pod Security labels. + Refer to Kubernetes documentation on Pod Security Admission to learn more about these + violations. + expr: | + sum(increase(pod_security_evaluations_total{decision="deny",mode="audit",resource="pod",ocp_namespace!=""}[1d])) by (policy_level, ocp_namespace) > 0 + labels: + namespace: openshift-kube-apiserver + severity: info diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts/reconcile.go index fc9fb6f746..436d1ad4cc 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts/reconcile.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts/reconcile.go @@ -12,8 +12,11 @@ import ( var ( //go:embed assets/apiusage.yaml apiUsageYaml []byte + //go:embed assets/podsecurity-violation.yaml + podSecurityViolationYaml []byte - apiUsage = createRule(apiUsageYaml) + apiUsage = createRule(apiUsageYaml) + podSecurityViolation = createRule(podSecurityViolationYaml) ) func ReconcileApiUsageRule(rule *prometheusoperatorv1.PrometheusRule) error { @@ -21,6 +24,11 @@ func ReconcileApiUsageRule(rule *prometheusoperatorv1.PrometheusRule) error { return nil } +func ReconcilePodSecurityViolationRule(rule *prometheusoperatorv1.PrometheusRule) error { + rule.Spec = podSecurityViolation.Spec + return nil +} + func createRule(content []byte) *prometheusoperatorv1.PrometheusRule { rule := &prometheusoperatorv1.PrometheusRule{} deserializeResource(content, rule) diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cco/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cco/reconcile.go new file mode 100644 index 0000000000..a8ef999ae7 --- /dev/null +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cco/reconcile.go @@ -0,0 +1,13 @@ +package cco + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +func ReconcileCloudCredentialConfig(cfg *operatorv1.CloudCredential) { + if cfg.Spec.ManagementState == "" { + cfg.Spec.ManagementState = operatorv1.Managed + } + + cfg.Spec.CredentialsMode = operatorv1.CloudCredentialsModeManual +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cco/reconcile_test.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cco/reconcile_test.go new file mode 100644 index 0000000000..04c0ee6f63 --- /dev/null +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cco/reconcile_test.go @@ -0,0 +1,41 @@ +package cco + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests" +) + +func TestReconcileCloudCredentialConfig(t *testing.T) { + testsCases := []struct { + name string + inputConfig *operatorv1.CloudCredential + expectedConfig *operatorv1.CloudCredential + }{ + { + name: "create", + inputConfig: manifests.CloudCredential(), + expectedConfig: &operatorv1.CloudCredential{ + ObjectMeta: manifests.CloudCredential().ObjectMeta, + Spec: operatorv1.CloudCredentialSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + CredentialsMode: operatorv1.CloudCredentialsModeManual, + }, + }, + }, + } + for _, tc := range testsCases { + t.Run(tc.name, func(t *testing.T) { + config := tc.inputConfig + ReconcileCloudCredentialConfig(config) + if diff := cmp.Diff(config, tc.expectedConfig); diff != "" { + t.Errorf("invalid reconciled config: %v", diff) + } + }) + } +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go index b88068bf12..a193818271 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/params.go @@ -2,17 +2,19 @@ package ingress import ( configv1 "github.com/openshift/api/config/v1" + v1 "github.com/openshift/api/operator/v1" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/support/globalconfig" ) type IngressParams struct { - IngressSubdomain string - Replicas int32 - PlatformType hyperv1.PlatformType - IsPrivate bool - IBMCloudUPI bool - AWSNLB bool + IngressSubdomain string + Replicas int32 + PlatformType hyperv1.PlatformType + IsPrivate bool + IBMCloudUPI bool + AWSNLB bool + LoadBalancerScope v1.LoadBalancerScope } func NewIngressParams(hcp *hyperv1.HostedControlPlane) *IngressParams { @@ -20,12 +22,16 @@ func NewIngressParams(hcp *hyperv1.HostedControlPlane) *IngressParams { isPrivate := false ibmCloudUPI := false nlb := false + loadBalancerScope := v1.ExternalLoadBalancer if hcp.Spec.Platform.IBMCloud != nil && hcp.Spec.Platform.IBMCloud.ProviderType == configv1.IBMCloudProviderTypeUPI { ibmCloudUPI = true } if hcp.Annotations[hyperv1.PrivateIngressControllerAnnotation] == "true" { isPrivate = true } + if hcp.Annotations[hyperv1.IngressControllerLoadBalancerScope] == string(v1.InternalLoadBalancer) { + loadBalancerScope = v1.InternalLoadBalancer + } if hcp.Spec.InfrastructureAvailabilityPolicy == hyperv1.HighlyAvailable { replicas = 2 } @@ -34,12 +40,12 @@ func NewIngressParams(hcp *hyperv1.HostedControlPlane) *IngressParams { } return &IngressParams{ - IngressSubdomain: globalconfig.IngressDomain(hcp), - Replicas: replicas, - PlatformType: hcp.Spec.Platform.Type, - IsPrivate: isPrivate, - IBMCloudUPI: ibmCloudUPI, - AWSNLB: nlb, + IngressSubdomain: globalconfig.IngressDomain(hcp), + Replicas: replicas, + PlatformType: hcp.Spec.Platform.Type, + IsPrivate: isPrivate, + IBMCloudUPI: ibmCloudUPI, + AWSNLB: nlb, + LoadBalancerScope: loadBalancerScope, } - } diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go index 227bd099e5..a718b298c8 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile.go @@ -12,7 +12,7 @@ import ( "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests" ) -func ReconcileDefaultIngressController(ingressController *operatorv1.IngressController, ingressSubdomain string, platformType hyperv1.PlatformType, replicas int32, isIBMCloudUPI bool, isPrivate bool, useNLB bool) error { +func ReconcileDefaultIngressController(ingressController *operatorv1.IngressController, ingressSubdomain string, platformType hyperv1.PlatformType, replicas int32, isIBMCloudUPI bool, isPrivate bool, useNLB bool, loadBalancerScope operatorv1.LoadBalancerScope) error { // If ingress controller already exists, skip reconciliation to allow day-2 configuration if ingressController.ResourceVersion != "" { return nil @@ -43,7 +43,7 @@ func ReconcileDefaultIngressController(ingressController *operatorv1.IngressCont case hyperv1.AWSPlatform: if useNLB { ingressController.Spec.EndpointPublishingStrategy.LoadBalancer = &operatorv1.LoadBalancerStrategy{ - Scope: operatorv1.ExternalLoadBalancer, + Scope: loadBalancerScope, ProviderParameters: &operatorv1.ProviderLoadBalancerParameters{ Type: operatorv1.AWSLoadBalancerProvider, AWS: &operatorv1.AWSLoadBalancerParameters{ @@ -68,7 +68,7 @@ func ReconcileDefaultIngressController(ingressController *operatorv1.IngressCont ingressController.Spec.EndpointPublishingStrategy = &operatorv1.EndpointPublishingStrategy{ Type: operatorv1.LoadBalancerServiceStrategyType, LoadBalancer: &operatorv1.LoadBalancerStrategy{ - Scope: operatorv1.ExternalLoadBalancer, + Scope: loadBalancerScope, }, } } diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go index 3894f4f79a..cbeccab91e 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress/reconcile_test.go @@ -23,6 +23,7 @@ func TestReconcileDefaultIngressController(t *testing.T) { inputIsIBMCloudUPI bool inputIsPrivate bool inputIsNLB bool + inputLoadBalancerScope operatorv1.LoadBalancerScope expectedIngressController *operatorv1.IngressController }{ { @@ -56,13 +57,14 @@ func TestReconcileDefaultIngressController(t *testing.T) { }, }, { - name: "IBM Cloud Non-UPI uses LoadBalancer publishing strategy", + name: "IBM Cloud Non-UPI uses LoadBalancer publishing strategy (External)", inputIngressController: manifests.IngressDefaultIngressController(), inputIngressDomain: fakeIngressDomain, inputPlatformType: hyperv1.IBMCloudPlatform, inputReplicas: fakeInputReplicas, inputIsIBMCloudUPI: false, inputIsPrivate: false, + inputLoadBalancerScope: operatorv1.ExternalLoadBalancer, expectedIngressController: &operatorv1.IngressController{ ObjectMeta: manifests.IngressDefaultIngressController().ObjectMeta, Spec: operatorv1.IngressControllerSpec{ @@ -85,6 +87,37 @@ func TestReconcileDefaultIngressController(t *testing.T) { }, }, }, + { + name: "IBM Cloud Non-UPI uses LoadBalancer publishing strategy (Internal)", + inputIngressController: manifests.IngressDefaultIngressController(), + inputIngressDomain: fakeIngressDomain, + inputPlatformType: hyperv1.IBMCloudPlatform, + inputReplicas: fakeInputReplicas, + inputIsIBMCloudUPI: false, + inputIsPrivate: false, + inputLoadBalancerScope: operatorv1.InternalLoadBalancer, + expectedIngressController: &operatorv1.IngressController{ + ObjectMeta: manifests.IngressDefaultIngressController().ObjectMeta, + Spec: operatorv1.IngressControllerSpec{ + Domain: fakeIngressDomain, + Replicas: &fakeInputReplicas, + EndpointPublishingStrategy: &operatorv1.EndpointPublishingStrategy{ + Type: operatorv1.LoadBalancerServiceStrategyType, + LoadBalancer: &operatorv1.LoadBalancerStrategy{ + Scope: operatorv1.InternalLoadBalancer, + }, + }, + NodePlacement: &operatorv1.NodePlacement{ + Tolerations: []corev1.Toleration{ + { + Key: "dedicated", + Value: "edge", + }, + }, + }, + }, + }, + }, { name: "Kubevirt uses NodePort publishing strategy", inputIngressController: manifests.IngressDefaultIngressController(), @@ -228,6 +261,7 @@ func TestReconcileDefaultIngressController(t *testing.T) { inputIngressDomain: fakeIngressDomain, inputReplicas: fakeInputReplicas, inputIsNLB: true, + inputLoadBalancerScope: operatorv1.ExternalLoadBalancer, expectedIngressController: &operatorv1.IngressController{ ObjectMeta: manifests.IngressDefaultIngressController().ObjectMeta, Spec: operatorv1.IngressControllerSpec{ @@ -256,7 +290,7 @@ func TestReconcileDefaultIngressController(t *testing.T) { for _, tc := range testsCases { t.Run(tc.name, func(t *testing.T) { g := NewGomegaWithT(t) - err := ReconcileDefaultIngressController(tc.inputIngressController, tc.inputIngressDomain, tc.inputPlatformType, tc.inputReplicas, tc.inputIsIBMCloudUPI, tc.inputIsPrivate, tc.inputIsNLB) + err := ReconcileDefaultIngressController(tc.inputIngressController, tc.inputIngressDomain, tc.inputPlatformType, tc.inputReplicas, tc.inputIsIBMCloudUPI, tc.inputIsPrivate, tc.inputIsNLB, tc.inputLoadBalancerScope) g.Expect(err).To(BeNil()) g.Expect(tc.inputIngressController).To(BeEquivalentTo(tc.expectedIngressController)) }) diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/kas/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/kas/reconcile.go new file mode 100644 index 0000000000..4221d736d4 --- /dev/null +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/kas/reconcile.go @@ -0,0 +1,49 @@ +package kas + +import ( + "github.com/openshift/hypershift/support/util" + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + "k8s.io/utils/ptr" +) + +func ReconcileKASEndpoints(endpoints *corev1.Endpoints, address string, port int32) { + if endpoints.Labels == nil { + endpoints.Labels = map[string]string{} + } + endpoints.Labels[discoveryv1.LabelSkipMirror] = "true" + endpoints.Subsets = []corev1.EndpointSubset{{ + Addresses: []corev1.EndpointAddress{{ + IP: address, + }}, + Ports: []corev1.EndpointPort{{ + Name: "https", + Port: port, + Protocol: corev1.ProtocolTCP, + }}, + }} +} + +func ReconcileKASEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, address string, port int32) { + if endpointSlice.Labels == nil { + endpointSlice.Labels = map[string]string{} + } + endpointSlice.Labels[discoveryv1.LabelServiceName] = "kubernetes" + ipv4, err := util.IsIPv4(address) + if err != nil || ipv4 { + endpointSlice.AddressType = discoveryv1.AddressTypeIPv4 + } else { + endpointSlice.AddressType = discoveryv1.AddressTypeIPv6 + } + endpointSlice.Endpoints = []discoveryv1.Endpoint{{ + Addresses: []string{ + address, + }, + Conditions: discoveryv1.EndpointConditions{Ready: ptr.To(true)}, + }} + endpointSlice.Ports = []discoveryv1.EndpointPort{{ + Name: ptr.To("https"), + Port: ptr.To(port), + Protocol: ptr.To(corev1.ProtocolTCP), + }} +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go index 934d1bfee5..1d7de73d18 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity/reconcile.go @@ -30,7 +30,7 @@ var ( } ) -func ReconcileAgentDaemonSet(daemonset *appsv1.DaemonSet, deploymentConfig config.DeploymentConfig, image string, host string, port int32, platform hyperv1.PlatformType, proxy configv1.ProxyStatus) { +func ReconcileAgentDaemonSet(daemonset *appsv1.DaemonSet, deploymentConfig config.DeploymentConfig, image string, host string, port int32, platform hyperv1.PlatformSpec, proxy configv1.ProxyStatus) { var labels map[string]string if daemonset.Spec.Selector != nil && daemonset.Spec.Selector.MatchLabels != nil { labels = daemonset.Spec.Selector.MatchLabels @@ -49,6 +49,9 @@ func ReconcileAgentDaemonSet(daemonset *appsv1.DaemonSet, deploymentConfig confi Labels: labels, }, Spec: corev1.PodSpec{ + // Default is not the default, it means that the kubelets will re-use the hosts DNS resolver + DNSPolicy: corev1.DNSDefault, + HostNetwork: true, AutomountServiceAccountToken: pointer.Bool(false), SecurityContext: &corev1.PodSecurityContext{ RunAsUser: pointer.Int64(1000), @@ -63,10 +66,12 @@ func ReconcileAgentDaemonSet(daemonset *appsv1.DaemonSet, deploymentConfig confi }, }, } - if platform != hyperv1.IBMCloudPlatform { - daemonset.Spec.Template.Spec.HostNetwork = true - // Default is not the default, it means that the kubelets will re-use the hosts DNS resolver - daemonset.Spec.Template.Spec.DNSPolicy = corev1.DNSDefault + // IBMCloud requires the following settings + if platform.Type == hyperv1.IBMCloudPlatform { + daemonset.Spec.Template.Spec.HostNetwork = false + if platform.IBMCloud != nil && platform.IBMCloud.ProviderType == configv1.IBMCloudProviderTypeUPI { + daemonset.Spec.Template.Spec.DNSPolicy = corev1.DNSClusterFirst + } } deploymentConfig.ApplyToDaemonSet(daemonset) } @@ -117,11 +122,11 @@ func buildKonnectivityWorkerAgentContainer(image, host string, port int32, proxy "--keepalive-time", "30s", "--probe-interval", - "30s", + "5s", "--sync-interval", - "1m", + "5s", "--sync-interval-cap", - "5m", + "30s", "--v", "3", } diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/alerts.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/alerts.go index 97fb3fcbd8..e953fc0b29 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/alerts.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/alerts.go @@ -13,3 +13,12 @@ func ApiUsageRule() *prometheusoperatorv1.PrometheusRule { }, } } + +func PodSecurityViolationRule() *prometheusoperatorv1.PrometheusRule { + return &prometheusoperatorv1.PrometheusRule{ + ObjectMeta: metav1.ObjectMeta{ + Name: "podsecurity", + Namespace: "openshift-kube-apiserver", + }, + } +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/cco.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/cco.go new file mode 100644 index 0000000000..b3883ab9dd --- /dev/null +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/cco.go @@ -0,0 +1,14 @@ +package manifests + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func CloudCredential() *operatorv1.CloudCredential { + return &operatorv1.CloudCredential{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + } +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/config.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/config.go index 65757a4d9f..b201425d92 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/config.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/config.go @@ -13,12 +13,3 @@ func InstallConfigConfigMap() *corev1.ConfigMap { }, } } - -func APIServerEndpoints() *corev1.Endpoints { - return &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "kubernetes", - }, - } -} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/kas.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/kas.go new file mode 100644 index 0000000000..1116d587fe --- /dev/null +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/kas.go @@ -0,0 +1,25 @@ +package manifests + +import ( + corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func KASEndpoints() *corev1.Endpoints { + return &corev1.Endpoints{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubernetes", + Namespace: corev1.NamespaceDefault, + }, + } +} + +func KASEndpointSlice() *discoveryv1.EndpointSlice { + return &discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubernetes", + Namespace: corev1.NamespaceDefault, + }, + } +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/olm.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/olm.go index be6ef37da1..93399c5eb3 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/olm.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/olm.go @@ -1,11 +1,11 @@ package manifests import ( + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" - - operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ) func CertifiedOperatorsCatalogSource() *operatorsv1alpha1.CatalogSource { @@ -78,3 +78,11 @@ func OLMPackageServerEndpoints() *corev1.Endpoints { }, } } + +func OperatorHub() *configv1.OperatorHub { + return &configv1.OperatorHub{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + } +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/pki.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/pki.go index 285d2cd258..94fbdec28c 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/pki.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests/pki.go @@ -22,3 +22,12 @@ func UserCABundle() *corev1.ConfigMap { }, } } + +func ImageRegistryAdditionalTrustedCAConfigMap(name string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "openshift-config", + }, + } +} diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go index f159378b67..9e5ca403d1 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources.go @@ -9,9 +9,12 @@ import ( "sync" "time" + "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/api" + "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cco" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -36,6 +39,7 @@ import ( configv1 "github.com/openshift/api/config/v1" imageregistryv1 "github.com/openshift/api/imageregistry/v1" + openshiftcpv1 "github.com/openshift/api/openshiftcontrolplane/v1" operatorv1 "github.com/openshift/api/operator/v1" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" @@ -44,10 +48,12 @@ import ( kubevirtcsi "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/csi/kubevirt" "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/cvo" cpomanifests "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/ocm" alerts "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/alerts" ccm "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/cloudcontrollermanager/azure" "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/crd" "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/ingress" + "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/kas" "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/konnectivity" "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/kubeadminpassword" "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator/controllers/resources/manifests" @@ -190,7 +196,7 @@ func Setup(opts *operator.HostedClusterConfigOperatorConfig) error { &admissionregistrationv1.ValidatingWebhookConfiguration{}, &prometheusoperatorv1.PrometheusRule{}, &operatorv1.IngressController{}, - &imageregistryv1.Config{}, + &discoveryv1.EndpointSlice{}, } for _, r := range resourcesToWatch { if err := c.Watch(source.Kind(opts.Manager.GetCache(), r), eventHandler()); err != nil { @@ -201,14 +207,6 @@ func Setup(opts *operator.HostedClusterConfigOperatorConfig) error { return fmt.Errorf("failed to watch HostedControlPlane: %w", err) } - if err := c.Watch(source.Kind(opts.CPCluster.GetCache(), &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ - Name: manifests.PullSecret(opts.Namespace).Name, - Namespace: opts.Namespace, - }, - }), eventHandler()); err != nil { - return fmt.Errorf("failed to watch HCP pullsecret: %w", err) - } - return nil } @@ -253,23 +251,14 @@ func (r *reconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result errs = append(errs, fmt.Errorf("failed to reconcile crds: %w", err)) } - // We only keep reconciling the endpoint for existing clusters that are relying on this for nodes haproxy to work. - // Otherwise, changing the haproxy config to !=443 would result in a NodePool rollout which want to avoid for existing clusters. - // Existing clusters are given the *hcp.Spec.Networking.APIServer.Port == 443 semantic as we were enforcing this default previously, - // and it's a now a forbidden operation. - if hcp.Spec.Networking.APIServer != nil && hcp.Spec.Networking.APIServer.Port != nil && - *hcp.Spec.Networking.APIServer.Port == 443 { - log.Info("reconciling kubernetes.default endpoints") - endpoints := manifests.APIServerEndpoints() - if _, err := r.CreateOrUpdate(ctx, r.client, endpoints, func() error { - if len(endpoints.Subsets) == 0 || len(endpoints.Subsets[0].Ports) == 0 { - return nil - } - endpoints.Subsets[0].Ports[0].Port = 443 - return nil - }); err != nil { - errs = append(errs, fmt.Errorf("failed to reconcile kubernetes.default endpoints: %w", err)) - } + // Clusters with "none" as their Kubernetes API server endpoint reconciliation + // type must manually manage the Kubernetes endpoints and endpointslice resources. + // Due to recent Kubernetes changes, we need to reconcile these resources to avoid + // problems such as [1]. + // [1] https://github.com/kubernetes/kubernetes/issues/118777 + log.Info("reconciling kubernetes.default endpoints and endpointslice") + if err := r.reconcileKASEndpoints(ctx, hcp); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile kubernetes.default endpoints and endpointslice: %w", err)) } log.Info("reconciling install configmap") @@ -315,12 +304,61 @@ func (r *reconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result }); err != nil { errs = append(errs, fmt.Errorf("failed to reconcile imageregistry config: %w", err)) } + if registryConfig.Spec.ManagementState == operatorv1.Removed && r.platformType != hyperv1.IBMCloudPlatform { + log.Info("imageregistry operator managementstate is removed, disabling openshift-controller-manager controllers and cleaning up resources") + ocmConfigMap := cpomanifests.OpenShiftControllerManagerConfig(r.hcpNamespace) + if _, err := r.CreateOrUpdate(ctx, r.cpClient, ocmConfigMap, func() error { + if ocmConfigMap.Data == nil { + // CPO has not created the configmap yet, wait for create + // This should not happen as we are started by the CPO after the configmap should be created + return nil + } + config := &openshiftcpv1.OpenShiftControllerManagerConfig{} + if configStr, exists := ocmConfigMap.Data[ocm.ConfigKey]; exists && len(configStr) > 0 { + err := util.DeserializeResource(configStr, config, api.Scheme) + if err != nil { + return fmt.Errorf("unable to decode existing openshift controller manager configuration: %w", err) + } + } + config.Controllers = []string{"*", fmt.Sprintf("-%s", openshiftcpv1.OpenShiftServiceAccountPullSecretsController)} + configStr, err := util.SerializeResource(config, api.Scheme) + if err != nil { + return fmt.Errorf("failed to serialize openshift controller manager configuration: %w", err) + } + ocmConfigMap.Data[ocm.ConfigKey] = configStr + return nil + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile openshift-controller-manager config: %w", err)) + } + } + + if hcp.Spec.Configuration != nil && hcp.Spec.Configuration.Image != nil && hcp.Spec.Configuration.Image.AdditionalTrustedCA.Name != "" { + additionalTrustedCAName := hcp.Spec.Configuration.Image.AdditionalTrustedCA.Name + src := &corev1.ConfigMap{} + err := r.cpClient.Get(ctx, client.ObjectKey{Namespace: hcp.Namespace, Name: additionalTrustedCAName}, src) + if err != nil { + errs = append(errs, fmt.Errorf("failed to get image registry additional trusted CA configmap %s: %w", additionalTrustedCAName, err)) + } else { + dst := manifests.ImageRegistryAdditionalTrustedCAConfigMap(additionalTrustedCAName) + if _, err := r.CreateOrUpdate(ctx, r.client, dst, func() error { + dst.Data = src.Data + return nil + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile image registry additional trusted CA configmap %s: %w", additionalTrustedCAName, err)) + } + } + } log.Info("reconciling ingress controller") if err := r.reconcileIngressController(ctx, hcp); err != nil { errs = append(errs, fmt.Errorf("failed to reconcile ingress controller: %w", err)) } + log.Info("reconciling oauth client secrets") + if err := r.reconcileAuthOIDC(ctx, hcp); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile oauth client secrets: %w", err)) + } + log.Info("reconciling kube control plane signer secret") kubeControlPlaneSignerSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -637,6 +675,14 @@ func (r *reconciler) reconcileConfig(ctx context.Context, hcp *hyperv1.HostedCon errs = append(errs, fmt.Errorf("failed to reconcile dns config: %w", err)) } + image := globalconfig.ImageConfig() + if _, err := r.CreateOrUpdate(ctx, r.client, image, func() error { + globalconfig.ReconcileImageConfig(image, hcp) + return nil + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile image config: %w", err)) + } + ingress := globalconfig.IngressConfig() if _, err := r.CreateOrUpdate(ctx, r.client, ingress, func() error { globalconfig.ReconcileIngressConfig(ingress, hcp) @@ -683,11 +729,12 @@ func (r *reconciler) reconcileConfig(ctx context.Context, hcp *hyperv1.HostedCon errs = append(errs, fmt.Errorf("failed to reconcile dns config: %w", err)) } - cloudCredsConfig := globalconfig.CloudCredentialsConfiguration() - if _, err := r.CreateOrUpdate(ctx, r.client, cloudCredsConfig, func() error { - return globalconfig.ReconcileCloudCredentialsConfiguration(cloudCredsConfig) + cloudCredentialConfig := manifests.CloudCredential() + if _, err := r.CreateOrUpdate(ctx, r.client, cloudCredentialConfig, func() error { + cco.ReconcileCloudCredentialConfig(cloudCredentialConfig) + return nil }); err != nil { - errs = append(errs, fmt.Errorf("failed to reconcile cloud credentials config: %w", err)) + errs = append(errs, fmt.Errorf("failed to reconcile cloud credential config: %w", err)) } authenticationConfig := globalconfig.AuthenticationConfiguration() @@ -697,6 +744,13 @@ func (r *reconciler) reconcileConfig(ctx context.Context, hcp *hyperv1.HostedCon errs = append(errs, fmt.Errorf("failed to reconcile authentication config: %w", err)) } + apiServerConfig := globalconfig.APIServerConfiguration() + if _, err := r.CreateOrUpdate(ctx, r.client, apiServerConfig, func() error { + return globalconfig.ReconcileAPIServerConfiguration(apiServerConfig, hcp.Spec.Configuration) + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile apiserver config: %w", err)) + } + return errors.NewAggregate(errs) } @@ -859,7 +913,7 @@ func (r *reconciler) reconcileIngressController(ctx context.Context, hcp *hyperv p := ingress.NewIngressParams(hcp) ingressController := manifests.IngressDefaultIngressController() if _, err := r.CreateOrUpdate(ctx, r.client, ingressController, func() error { - return ingress.ReconcileDefaultIngressController(ingressController, p.IngressSubdomain, p.PlatformType, p.Replicas, p.IBMCloudUPI, p.IsPrivate, p.AWSNLB) + return ingress.ReconcileDefaultIngressController(ingressController, p.IngressSubdomain, p.PlatformType, p.Replicas, p.IBMCloudUPI, p.IsPrivate, p.AWSNLB, p.LoadBalancerScope) }); err != nil { errs = append(errs, fmt.Errorf("failed to reconcile default ingress controller: %w", err)) } @@ -936,6 +990,70 @@ func (r *reconciler) reconcileIngressController(ctx context.Context, hcp *hyperv return errors.NewAggregate(errs) } +func (r *reconciler) reconcileAuthOIDC(ctx context.Context, hcp *hyperv1.HostedControlPlane) error { + var errs []error + if !util.HCPOAuthEnabled(hcp) && + len(hcp.Spec.Configuration.Authentication.OIDCProviders) != 0 { + + // Copy issuer CA configmap into openshift-config namespace + provider := hcp.Spec.Configuration.Authentication.OIDCProviders[0] + if provider.Issuer.CertificateAuthority.Name != "" { + name := provider.Issuer.CertificateAuthority.Name + var src corev1.ConfigMap + err := r.cpClient.Get(ctx, client.ObjectKey{Namespace: hcp.Namespace, Name: name}, &src) + if err != nil { + errs = append(errs, fmt.Errorf("failed to get issuer CA configmap %s: %w", name, err)) + } else { + dest := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "openshift-config", + }, + } + _, err = r.CreateOrUpdate(ctx, r.client, &dest, func() error { + if dest.Data == nil { + dest.Data = map[string]string{} + } + dest.Data["ca-bundle.crt"] = src.Data["ca-bundle.crt"] + return nil + }) + if err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile issuer CA configmap %s: %w", dest.Name, err)) + } + } + } + + // Copy OIDCClient Secrets into openshift-config namespace + if len(hcp.Spec.Configuration.Authentication.OIDCProviders[0].OIDCClients) > 0 { + for _, oidcClient := range hcp.Spec.Configuration.Authentication.OIDCProviders[0].OIDCClients { + var src corev1.Secret + err := r.cpClient.Get(ctx, client.ObjectKey{Namespace: hcp.Namespace, Name: oidcClient.ClientSecret.Name}, &src) + if err != nil { + errs = append(errs, fmt.Errorf("failed to get OIDCClient secret %s: %w", oidcClient.ClientSecret.Name, err)) + continue + } + dest := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: oidcClient.ClientSecret.Name, + Namespace: "openshift-config", + }, + } + _, err = r.CreateOrUpdate(ctx, r.client, &dest, func() error { + if dest.Data == nil { + dest.Data = map[string][]byte{} + } + dest.Data["clientSecret"] = src.Data["clientSecret"] + return nil + }) + if err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile OIDCClient secret %s: %w", dest.Name, err)) + } + } + } + } + return errors.NewAggregate(errs) +} + func (r *reconciler) reconcileKonnectivityAgent(ctx context.Context, hcp *hyperv1.HostedControlPlane, releaseImage *releaseinfo.ReleaseImage) error { var errs []error @@ -990,7 +1108,7 @@ func (r *reconciler) reconcileKonnectivityAgent(ctx context.Context, hcp *hyperv agentDaemonset := manifests.KonnectivityAgentDaemonSet() if _, err := r.CreateOrUpdate(ctx, r.client, agentDaemonset, func() error { - konnectivity.ReconcileAgentDaemonSet(agentDaemonset, p.DeploymentConfig, p.Image, p.ExternalAddress, p.ExternalPort, hcp.Spec.Platform.Type, proxy.Status) + konnectivity.ReconcileAgentDaemonSet(agentDaemonset, p.DeploymentConfig, p.Image, p.ExternalAddress, p.ExternalPort, hcp.Spec.Platform, proxy.Status) return nil }); err != nil { errs = append(errs, fmt.Errorf("failed to reconcile konnectivity agent daemonset: %w", err)) @@ -1051,6 +1169,38 @@ func (r *reconciler) reconcileOpenshiftOAuthAPIServerAPIServices(ctx context.Con return errors.NewAggregate(errs) } +func (r *reconciler) reconcileKASEndpoints(ctx context.Context, hcp *hyperv1.HostedControlPlane) error { + var errs []error + + kasAdvertiseAddress := util.GetAdvertiseAddress(hcp, config.DefaultAdvertiseIPv4Address, config.DefaultAdvertiseIPv6Address) + kasEndpointsPort := util.KASPodPort(hcp) + + // We only keep reconciling the endpoint for existing clusters that are relying on this for nodes haproxy to work. + // Otherwise, changing the haproxy config to !=443 would result in a NodePool rollout which want to avoid for existing clusters. + // Existing clusters are given the *hcp.Spec.Networking.APIServer.Port == 443 semantic as we were enforcing this default previously, + // and it's a now a forbidden operation. + if hcp.Spec.Networking.APIServer != nil && hcp.Spec.Networking.APIServer.Port != nil && + *hcp.Spec.Networking.APIServer.Port == 443 { + kasEndpointsPort = 443 + } + + kasEndpoints := manifests.KASEndpoints() + if _, err := r.CreateOrUpdate(ctx, r.client, kasEndpoints, func() error { + kas.ReconcileKASEndpoints(kasEndpoints, kasAdvertiseAddress, kasEndpointsPort) + return nil + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile kubernetes.default endpoints: %w", err)) + } + kasEndpointSlice := manifests.KASEndpointSlice() + if _, err := r.CreateOrUpdate(ctx, r.client, kasEndpointSlice, func() error { + kas.ReconcileKASEndpointSlice(kasEndpointSlice, kasAdvertiseAddress, kasEndpointsPort) + return nil + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile kubernetes.default endpoint slice: %w", err)) + } + return errors.NewAggregate(errs) +} + func (r *reconciler) reconcileOpenshiftAPIServerEndpoints(ctx context.Context, hcp *hyperv1.HostedControlPlane) error { cpService := manifests.OpenShiftAPIServerService(hcp.Namespace) if err := r.cpClient.Get(ctx, client.ObjectKeyFromObject(cpService), cpService); err != nil { @@ -1173,6 +1323,7 @@ func (r *reconciler) reconcileUserCertCABundle(ctx context.Context, hcp *hyperv1 const awsCredentialsTemplate = `[default] role_arn = %s web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token +sts_regional_endpoints = regional ` func (r *reconciler) reconcileCloudCredentialSecrets(ctx context.Context, hcp *hyperv1.HostedControlPlane, log logr.Logger) []error { @@ -1338,17 +1489,53 @@ func (r *reconciler) reconcileCloudCredentialSecrets(ctx context.Context, hcp *h return errs } +// reconcileOperatorHub gets the OperatorHubConfig from the HCP, for now the controller only reconcile over the DisableAllDefaultSources field and only once. +// After that the HCCO checks the OperatorHub object in the HC to manage the OLM resources. +// TODO (jparrill): Include in the reconciliation the OperatorHub.Sources to disable only the selected sources. +func (r *reconciler) reconcileOperatorHub(ctx context.Context, operatorHub *configv1.OperatorHub, hcp *hyperv1.HostedControlPlane) []error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling HCP OperatorHub config") + if operatorHub.ResourceVersion == "" { + if hcp.Spec.Configuration != nil && hcp.Spec.Configuration.OperatorHub != nil { + operatorHub.Spec.DisableAllDefaultSources = hcp.Spec.Configuration.OperatorHub.DisableAllDefaultSources + } + } + + return nil +} + func (r *reconciler) reconcileOLM(ctx context.Context, hcp *hyperv1.HostedControlPlane) []error { var errs []error + operatorHub := manifests.OperatorHub() + + if hcp.Spec.OLMCatalogPlacement == hyperv1.ManagementOLMCatalogPlacement { + // Management OLM Placement + if _, err := r.CreateOrUpdate(ctx, r.client, operatorHub, func() error { + if hcp.Spec.Configuration != nil && hcp.Spec.Configuration.OperatorHub != nil { + // if spec.Configuration.OperatorHub is set, we need to sync it to the guest cluster + operatorHub.Spec.DisableAllDefaultSources = hcp.Spec.Configuration.OperatorHub.DisableAllDefaultSources + } else { + // If the spec.Configuration is nil or the spec.Configuration.OperatorHub is nil, then we need to set the OperatorHub.Spec to an empty struct + operatorHub.Spec = configv1.OperatorHubSpec{} + } + return nil + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile OperatorHub configuration: %w", err)) + } + } else { + // Guest OLM Placement + if _, err := r.CreateOrUpdate(ctx, r.client, operatorHub, func() error { + r.reconcileOperatorHub(ctx, operatorHub, hcp) + return nil + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile OperatorHub configuration: %w", err)) + } + } + p := olm.NewOperatorLifecycleManagerParams(hcp) // Check if the defaultSources are disabled - operatorHub := &configv1.OperatorHub{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - }, - } if err := r.client.Get(ctx, client.ObjectKeyFromObject(operatorHub), operatorHub); err != nil { if !apierrors.IsNotFound(err) { errs = append(errs, fmt.Errorf("failed to get OperatorHub %s: %w", client.ObjectKeyFromObject(operatorHub).String(), err)) @@ -1368,7 +1555,7 @@ func (r *reconciler) reconcileOLM(ctx context.Context, hcp *hyperv1.HostedContro for _, catalog := range catalogs { cs := catalog.manifest() if operatorHub.Spec.DisableAllDefaultSources { - if err := r.client.Delete(ctx, cs); err != nil { + if _, err := util.DeleteIfNeeded(ctx, r.client, cs); err != nil { if !apierrors.IsNotFound(err) { errs = append(errs, fmt.Errorf("failed to delete catalogSource %s/%s: %w", cs.Namespace, cs.Name, err)) } @@ -1431,10 +1618,6 @@ func (r *reconciler) reconcileObservedConfiguration(ctx context.Context, hcp *hy source client.Object observedCM *corev1.ConfigMap }{ - { - source: globalconfig.ImageConfig(), - observedCM: globalconfig.ObservedImageConfig(hcp.Namespace), - }, { source: globalconfig.BuildConfig(), observedCM: globalconfig.ObservedBuildConfig(hcp.Namespace), @@ -1512,6 +1695,12 @@ func (r *reconciler) reconcileGuestClusterAlertRules(ctx context.Context) error }); err != nil { errs = append(errs, fmt.Errorf("failed to reconcile guest cluster api usage rule: %w", err)) } + podSecurityViolationRule := manifests.PodSecurityViolationRule() + if _, err := r.CreateOrUpdate(ctx, r.client, podSecurityViolationRule, func() error { + return alerts.ReconcilePodSecurityViolationRule(podSecurityViolationRule) + }); err != nil { + errs = append(errs, fmt.Errorf("failed to reconcile guest cluster pod security violation rule: %w", err)) + } return errors.NewAggregate(errs) } diff --git a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go index 9a17f9ef5f..24a376db52 100644 --- a/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go +++ b/control-plane-operator/hostedclusterconfigoperator/controllers/resources/resources_test.go @@ -9,10 +9,12 @@ import ( . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" + discoveryv1 "k8s.io/api/discovery/v1" rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" @@ -161,6 +163,116 @@ func TestReconcileErrorHandling(t *testing.T) { } } +func TestReconcileOLM(t *testing.T) { + var errs []error + hcp := fakeHCP() + hcp.Namespace = "openshift-operator-lifecycle-manager" + fakeCPService := manifests.OLMPackageServerControlPlaneService(hcp.Namespace) + fakeCPService.Spec.ClusterIP = "172.30.108.248" + rootCA := cpomanifests.RootCASecret(hcp.Namespace) + ctx := context.Background() + + testCases := []struct { + name string + hcpClusterConfig *hyperv1.ClusterConfiguration + olmCatalogPlacement hyperv1.OLMCatalogPlacement + want *configv1.OperatorHubSpec + }{ + { + name: "PlacementStrategy is management and no configuration provided", + hcpClusterConfig: nil, + olmCatalogPlacement: hyperv1.ManagementOLMCatalogPlacement, + want: &configv1.OperatorHubSpec{}, + }, + { + name: "PlacementStrategy is management and allDefaultSources disabled", + hcpClusterConfig: &hyperv1.ClusterConfiguration{ + OperatorHub: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: true, + }, + }, + olmCatalogPlacement: hyperv1.ManagementOLMCatalogPlacement, + want: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: true, + }, + }, + { + name: "PlacementStrategy is management and allDefaultSources enabled", + hcpClusterConfig: &hyperv1.ClusterConfiguration{ + OperatorHub: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: false, + }, + }, + olmCatalogPlacement: hyperv1.ManagementOLMCatalogPlacement, + want: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: false, + }, + }, + { + name: "PlacementStrategy is guest and no configuration provided", + hcpClusterConfig: nil, + olmCatalogPlacement: hyperv1.GuestOLMCatalogPlacement, + want: &configv1.OperatorHubSpec{}, + }, + { + // We expect here the OperatorHub in guest to keep the already set value and + // don't overwrite the value with the new one. + name: "PlacementStrategy is guest and allDefaultSources disabled, the first reconciliation loop already happened", + hcpClusterConfig: &hyperv1.ClusterConfiguration{ + OperatorHub: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: true, + }, + }, + olmCatalogPlacement: hyperv1.GuestOLMCatalogPlacement, + want: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: false, + }, + }, + { + name: "PlacementStrategy is guest and allDefaultSources enabled", + hcpClusterConfig: &hyperv1.ClusterConfiguration{ + OperatorHub: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: false, + }, + }, + olmCatalogPlacement: hyperv1.GuestOLMCatalogPlacement, + want: &configv1.OperatorHubSpec{ + DisableAllDefaultSources: false, + }, + }, + } + + cpClient := fake.NewClientBuilder(). + WithScheme(api.Scheme). + WithObjects(rootCA, fakeCPService, hcp). + Build() + hcCLient := fake.NewClientBuilder(). + WithScheme(api.Scheme). + WithObjects(rootCA). + Build() + + r := &reconciler{ + client: hcCLient, + cpClient: cpClient, + CreateOrUpdateProvider: &simpleCreateOrUpdater{}, + rootCA: "fake", + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + errs = append(errs, r.reconcileOLM(ctx, hcp)...) + hcp.Spec.Configuration = tc.hcpClusterConfig + hcp.Spec.OLMCatalogPlacement = tc.olmCatalogPlacement + errs = append(errs, r.reconcileOLM(ctx, hcp)...) + g.Expect(errs).To(BeEmpty(), "unexpected errors") + hcOpHub := manifests.OperatorHub() + err := r.client.Get(ctx, client.ObjectKeyFromObject(hcOpHub), hcOpHub) + g.Expect(err).To(BeNil(), "error checking HC OperatorHub") + g.Expect(hcOpHub.Spec).To(Equal(*tc.want)) + }) + } +} + type simpleCreateOrUpdater struct{} func (*simpleCreateOrUpdater) CreateOrUpdate(ctx context.Context, c client.Client, obj client.Object, f controllerutil.MutateFn) (controllerutil.OperationResult, error) { @@ -255,6 +367,36 @@ func fakeOperatorHub() *configv1.OperatorHub { } } +func withICS(hcp *hyperv1.HostedControlPlane) *hyperv1.HostedControlPlane { + hcpOriginal := hcp.DeepCopy() + hcpOriginal.Spec.ImageContentSources = []hyperv1.ImageContentSource{ + { + Source: "example.com/test", + Mirrors: []string{ + // the number after test is in purpose to not fit to the source namespace name + "mirror1.example.com/test1", + "mirror2.example.com/test2", + }, + }, + { + Source: "sample.com/test", + Mirrors: []string{ + "mirror1.sample.com/test1", + "mirror2.sample.com/test2", + }, + }, + { + Source: "quay.io/test", + Mirrors: []string{ + "mirror1.quay.io/test1", + "mirror2.quay.io/test2", + }, + }, + } + + return hcpOriginal +} + func TestReconcileKubeadminPasswordHashSecret(t *testing.T) { testNamespace := "master-cluster1" testHCPName := "cluster1" @@ -809,3 +951,130 @@ func TestReconcileClusterVersion(t *testing.T) { g.Expect(clusterVersion.Spec.Overrides).To(Equal(testOverrides)) g.Expect(clusterVersion.Spec.Channel).To(BeEmpty()) } + +func TestReconcileKASEndpoints(t *testing.T) { + + testCases := []struct { + name string + hcp *hyperv1.HostedControlPlane + expectedPort int32 + }{ + { + name: "When HC has hcp.spec.networking.apiServer.port set to 443, endpoint and slice should have port 443", + hcp: &hyperv1.HostedControlPlane{ + Spec: hyperv1.HostedControlPlaneSpec{ + Networking: hyperv1.ClusterNetworking{ + APIServer: &hyperv1.APIServerNetworking{ + Port: ptr.To(int32(443)), + }, + }, + }, + }, + expectedPort: int32(443), + }, + { + name: "When HC has no hcp.spec.networking.apiServer.port set, endpoint and slice should have port 6443", + hcp: &hyperv1.HostedControlPlane{ + Spec: hyperv1.HostedControlPlaneSpec{}, + }, + expectedPort: int32(6443), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(api.Scheme).Build() + r := &reconciler{ + client: fakeClient, + CreateOrUpdateProvider: &simpleCreateOrUpdater{}, + } + + err := r.reconcileKASEndpoints(context.Background(), tc.hcp) + g.Expect(err).ToNot(HaveOccurred()) + + endpoints := &corev1.Endpoints{} + err = fakeClient.Get(context.Background(), client.ObjectKey{Name: "kubernetes", Namespace: corev1.NamespaceDefault}, endpoints) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(endpoints.Subsets[0].Ports[0].Name).To(Equal("https")) + g.Expect(endpoints.Subsets[0].Ports[0].Port).To(Equal(int32(tc.expectedPort))) + + endpointSlice := &discoveryv1.EndpointSlice{} + err = fakeClient.Get(context.Background(), client.ObjectKey{Name: "kubernetes", Namespace: corev1.NamespaceDefault}, endpointSlice) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(endpoints.Subsets[0].Ports[0].Name).To(Equal("https")) + g.Expect(endpoints.Subsets[0].Ports[0].Port).To(Equal(int32(tc.expectedPort))) + }) + } +} + +func TestReconcileImageContentPolicyType(t *testing.T) { + testCases := []struct { + name string + hcp *hyperv1.HostedControlPlane + removeICSAndReconcile bool + }{ + { + name: "ICS with content, it should return an IDMS with the same content", + hcp: withICS(fakeHCP()), + }, + { + name: "ICS empty, is should return an empty IDMS", + hcp: fakeHCP(), + }, + { + name: "ICS And IDMS should be in sync always", + hcp: withICS(fakeHCP()), + removeICSAndReconcile: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(tc.hcp).Build() + r := &reconciler{ + client: fakeClient, + CreateOrUpdateProvider: &simpleCreateOrUpdater{}, + } + err := r.reconcileImageContentPolicyType(context.Background(), tc.hcp) + g.Expect(err).ToNot(HaveOccurred()) + + idms := globalconfig.ImageDigestMirrorSet() + err = fakeClient.Get(context.Background(), client.ObjectKeyFromObject(idms), idms) + g.Expect(err).ToNot(HaveOccurred(), "error getting IDMS") + + // Same number of ICS and IDMS + g.Expect(len(tc.hcp.Spec.ImageContentSources)).To(Equal(len(idms.Spec.ImageDigestMirrors)), "expecting equal values between IDMS and ICS") + + if tc.hcp.Spec.ImageContentSources != nil { + // Check if the ICS and IDMS have the same values + compareICSAndIDMS(g, tc.hcp.Spec.ImageContentSources, idms) + } + + if tc.removeICSAndReconcile { + // Simulating a user updating the HCP and removing the ICS + origHCP := tc.hcp.DeepCopy() + origHCP.Spec.ImageContentSources = nil + + err = r.reconcileImageContentPolicyType(context.Background(), origHCP) + g.Expect(err).ToNot(HaveOccurred()) + idms := globalconfig.ImageDigestMirrorSet() + err = fakeClient.Get(context.Background(), client.ObjectKeyFromObject(idms), idms) + g.Expect(err).ToNot(HaveOccurred(), "error getting IDMS") + g.Expect(len(origHCP.Spec.ImageContentSources)).To(Equal(len(idms.Spec.ImageDigestMirrors)), "expecting equal values between IDMS and ICS") + compareICSAndIDMS(g, origHCP.Spec.ImageContentSources, idms) + } + }) + } +} + +func compareICSAndIDMS(g *WithT, ics []hyperv1.ImageContentSource, idms *configv1.ImageDigestMirrorSet) { + g.Expect(len(ics)).To(Equal(len(idms.Spec.ImageDigestMirrors)), "expecting equal values between IDMS and ICS") + // Check if the ICS and IDMS have the same values + for i, ics := range ics { + g.Expect(ics.Source).To(Equal(idms.Spec.ImageDigestMirrors[i].Source)) + for j, mirrorics := range ics.Mirrors { + g.Expect(mirrorics).To(Equal(string(idms.Spec.ImageDigestMirrors[i].Mirrors[j]))) + } + } +} diff --git a/control-plane-operator/hostedclusterconfigoperator/operator/config.go b/control-plane-operator/hostedclusterconfigoperator/operator/config.go index e302b400a0..471128a945 100644 --- a/control-plane-operator/hostedclusterconfigoperator/operator/config.go +++ b/control-plane-operator/hostedclusterconfigoperator/operator/config.go @@ -127,6 +127,9 @@ func Mgr(cfg, cpConfig *rest.Config, namespace string) ctrl.Manager { &operatorv1.CloudCredential{}: allSelector, &admissionregistrationv1.ValidatingWebhookConfiguration{}: allSelector, &admissionregistrationv1.MutatingWebhookConfiguration{}: allSelector, + &operatorv1.Storage{}: allSelector, + &operatorv1.CSISnapshotController{}: allSelector, + &operatorv1.ClusterCSIDriver{}: allSelector, // Needed for inplace upgrader. &corev1.Node{}: allSelector, diff --git a/control-plane-operator/main.go b/control-plane-operator/main.go index a9cd2b677a..cac3c63288 100644 --- a/control-plane-operator/main.go +++ b/control-plane-operator/main.go @@ -13,7 +13,9 @@ import ( "github.com/openshift/hypershift/control-plane-operator/hostedclusterconfigoperator" pkiconfig "github.com/openshift/hypershift/control-plane-pki-operator/config" "github.com/openshift/hypershift/dnsresolver" + etcddefrag "github.com/openshift/hypershift/etcd-defrag" ignitionserver "github.com/openshift/hypershift/ignition-server/cmd" + konnectivityhttpsproxy "github.com/openshift/hypershift/konnectivity-https-proxy" konnectivitysocks5proxy "github.com/openshift/hypershift/konnectivity-socks5-proxy" kubernetesdefaultproxy "github.com/openshift/hypershift/kubernetes-default-proxy" "github.com/openshift/hypershift/pkg/version" @@ -46,8 +48,6 @@ import ( "github.com/openshift/hypershift/support/releaseinfo" "github.com/openshift/hypershift/support/upsert" - operatorv1 "github.com/openshift/api/operator/v1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/log/zap" // +kubebuilder:scaffold:imports @@ -80,10 +80,14 @@ func commandFor(name string) *cobra.Command { cmd = ignitionserver.NewStartCommand() case "konnectivity-socks5-proxy": cmd = konnectivitysocks5proxy.NewStartCommand() + case "konnectivity-https-proxy": + cmd = konnectivityhttpsproxy.NewStartCommand() case "availability-prober": cmd = availabilityprober.NewStartCommand() case "token-minter": cmd = tokenminter.NewStartCommand() + case "etcd-defrag-controller": + cmd = etcddefrag.NewStartCommand() default: // for the default case, there is no need // to convert flags, return immediately @@ -126,9 +130,11 @@ func defaultCommand() *cobra.Command { cmd.AddCommand(NewStartCommand()) cmd.AddCommand(hostedclusterconfigoperator.NewCommand()) cmd.AddCommand(konnectivitysocks5proxy.NewStartCommand()) + cmd.AddCommand(konnectivityhttpsproxy.NewStartCommand()) cmd.AddCommand(availabilityprober.NewStartCommand()) cmd.AddCommand(tokenminter.NewStartCommand()) cmd.AddCommand(ignitionserver.NewStartCommand()) + cmd.AddCommand(etcddefrag.NewStartCommand()) cmd.AddCommand(kubernetesdefaultproxy.NewStartCommand()) cmd.AddCommand(dnsresolver.NewCommand()) @@ -155,6 +161,7 @@ func NewStartCommand() *cobra.Command { deploymentName string metricsAddr string healthProbeAddr string + cpoImage string hostedClusterConfigOperatorImage string socks5ProxyImage string availabilityProberImage string @@ -212,9 +219,6 @@ func NewStartCommand() *cobra.Command { HealthProbeBindAddress: healthProbeAddr, Cache: cache.Options{ DefaultFieldSelector: fields.OneTermEqualSelector("metadata.namespace", namespace), - ByObject: map[crclient.Object]cache.ByObject{ - &operatorv1.IngressController{}: {Field: fields.OneTermEqualSelector("metadata.namespace", manifests.IngressPrivateIngressController("").Namespace)}, - }, }, }) if err != nil { @@ -241,6 +245,23 @@ func NewStartCommand() *cobra.Command { os.Exit(1) } + // The HyperShift operator is generally able to specify with precision the images + // that we need to use here. In order to be backwards-compatible, though, we need + // to do so with environment variables. While it's possible that a more vigorous + // refactor could remove the following self-referential image lookup code entirely, + // for now we remove it in practice by using the environment variables, when set. + for env, into := range map[string]*string{ + "CONTROL_PLANE_OPERATOR_IMAGE": &cpoImage, + "HOSTED_CLUSTER_CONFIG_OPERATOR_IMAGE": &hostedClusterConfigOperatorImage, + "SOCKS5_PROXY_IMAGE": &socks5ProxyImage, + "AVAILABILITY_PROBER_IMAGE": &availabilityProberImage, + "TOKEN_MINTER_IMAGE": &tokenMinterImage, + } { + if value := os.Getenv(env); value != "" { + *into = value + } + } + // For now, since the hosted cluster config operator is treated like any other // release payload component but isn't actually part of a release payload, // enable the user to specify an image directly as a flag, and otherwise @@ -326,7 +347,7 @@ func NewStartCommand() *cobra.Command { } setupLog.Info("using token minter image", "image", tokenMinterImage) - cpoImage, err := lookupOperatorImage("") + cpoImage, err = lookupOperatorImage(cpoImage) if err != nil { setupLog.Error(err, "failed to find controlplane-operator-image") os.Exit(1) @@ -360,6 +381,8 @@ func NewStartCommand() *cobra.Command { "token-minter": tokenMinterImage, "aws-kms-provider": awsKMSProviderImage, util.CPOImageName: cpoImage, + util.CPPKIOImageName: cpoImage, + "cluster-version-operator": os.Getenv("OPERATE_ON_RELEASE_IMAGE"), } for name, image := range imageOverrides { componentImages[name] = image @@ -372,15 +395,25 @@ func NewStartCommand() *cobra.Command { imageRegistryOverrides = util.ConvertImageRegistryOverrideStringToMap(openShiftImgOverrides) } - releaseProvider := &releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator{ + coreReleaseProvider := &releaseinfo.StaticProviderDecorator{ + Delegate: &releaseinfo.CachedProvider{ + Inner: &releaseinfo.RegistryClientProvider{}, + Cache: map[string]*releaseinfo.ReleaseImage{}, + }, + ComponentImages: componentImages, + } + + userReleaseProvider := &releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator{ + Delegate: &releaseinfo.RegistryMirrorProviderDecorator{ + Delegate: coreReleaseProvider, + RegistryOverrides: nil, // UserReleaseProvider shouldn't include registry overrides as they should not get propagated to the data plane. + }, + OpenShiftImageRegistryOverrides: imageRegistryOverrides, + } + + cpReleaseProvider := &releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator{ Delegate: &releaseinfo.RegistryMirrorProviderDecorator{ - Delegate: &releaseinfo.StaticProviderDecorator{ - Delegate: &releaseinfo.CachedProvider{ - Inner: &releaseinfo.RegistryClientProvider{}, - Cache: map[string]*releaseinfo.ReleaseImage{}, - }, - ComponentImages: componentImages, - }, + Delegate: coreReleaseProvider, RegistryOverrides: registryOverrides, }, OpenShiftImageRegistryOverrides: imageRegistryOverrides, @@ -400,7 +433,8 @@ func NewStartCommand() *cobra.Command { if err := (&hostedcontrolplane.HostedControlPlaneReconciler{ Client: mgr.GetClient(), ManagementClusterCapabilities: mgmtClusterCaps, - ReleaseProvider: releaseProvider, + ReleaseProvider: cpReleaseProvider, + UserReleaseProvider: userReleaseProvider, EnableCIDebugOutput: enableCIDebugOutput, OperateOnReleaseImage: os.Getenv("OPERATE_ON_RELEASE_IMAGE"), DefaultIngressDomain: defaultIngressDomain, diff --git a/control-plane-pki-operator/OWNERS b/control-plane-pki-operator/OWNERS new file mode 100644 index 0000000000..45efd63037 --- /dev/null +++ b/control-plane-pki-operator/OWNERS @@ -0,0 +1,2 @@ +labels: +- area/control-plane-pki-operator \ No newline at end of file diff --git a/control-plane-pki-operator/certificaterevocationcontroller/certificaterevocationcontroller.go b/control-plane-pki-operator/certificaterevocationcontroller/certificaterevocationcontroller.go new file mode 100644 index 0000000000..99d6bf7ecd --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/certificaterevocationcontroller.go @@ -0,0 +1,1058 @@ +package certificaterevocationcontroller + +import ( + "context" + "crypto/sha256" + "crypto/x509" + "errors" + "fmt" + "math/big" + "sort" + "strconv" + "strings" + "time" + + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + certificatesv1alpha1applyconfigurations "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + hypershiftclient "github.com/openshift/hypershift/client/clientset/clientset" + hypershiftinformers "github.com/openshift/hypershift/client/informers/externalversions" + hcpmanifests "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + "github.com/openshift/hypershift/control-plane-pki-operator/manifests" + "github.com/openshift/library-go/pkg/certs/cert-inspection/certgraphanalysis" + "github.com/openshift/library-go/pkg/certs/cert-inspection/certgraphapi" + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/certrotation" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" + authenticationv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" + metav1applyconfigurations "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + certutil "k8s.io/client-go/util/cert" + "k8s.io/klog/v2" +) + +type CertificateRevocationController struct { + kubeClient kubernetes.Interface + hypershiftClient hypershiftclient.Interface + + fieldManager string + getCRR func(namespace, name string) (*certificatesv1alpha1.CertificateRevocationRequest, error) + getSecret func(namespace, name string) (*corev1.Secret, error) + listSecrets func(namespace string) ([]*corev1.Secret, error) + getConfigMap func(namespace, name string) (*corev1.ConfigMap, error) + + // for unit testing only + skipKASConnections bool +} + +// TODO: we need some sort of time-based GC for completed CRRs + +func NewCertificateRevocationController( + hostedControlPlane *hypershiftv1beta1.HostedControlPlane, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + hypershiftInformers hypershiftinformers.SharedInformerFactory, + kubeClient kubernetes.Interface, + hypershiftClient hypershiftclient.Interface, + eventRecorder events.Recorder, +) factory.Controller { + c := &CertificateRevocationController{ + fieldManager: "certificate-revocation-controller", + kubeClient: kubeClient, + hypershiftClient: hypershiftClient, + getCRR: func(namespace, name string) (*certificatesv1alpha1.CertificateRevocationRequest, error) { + return hypershiftInformers.Certificates().V1alpha1().CertificateRevocationRequests().Lister().CertificateRevocationRequests(namespace).Get(name) + }, + getSecret: func(namespace, name string) (*corev1.Secret, error) { + return kubeInformersForNamespaces.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace).Get(name) + }, + listSecrets: func(namespace string) ([]*corev1.Secret, error) { + return kubeInformersForNamespaces.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace).List(labels.Everything()) + }, + getConfigMap: func(namespace, name string) (*corev1.ConfigMap, error) { + return kubeInformersForNamespaces.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace).Get(name) + }, + } + + crrInformer := hypershiftInformers.Certificates().V1alpha1().CertificateRevocationRequests().Informer() + secretInformer := kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().Secrets().Informer() + configMapInformer := kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().ConfigMaps().Informer() + listCRRs := func(namespace string) ([]*certificatesv1alpha1.CertificateRevocationRequest, error) { + return hypershiftInformers.Certificates().V1alpha1().CertificateRevocationRequests().Lister().CertificateRevocationRequests(hostedControlPlane.Namespace).List(labels.Everything()) + } + + return factory.New(). + WithInformersQueueKeysFunc(enqueueCertificateRevocationRequest, crrInformer). + WithInformersQueueKeysFunc(enqueueSecret(listCRRs), secretInformer). + WithInformersQueueKeysFunc(enqueueConfigMap(listCRRs), configMapInformer). + WithSync(c.syncCertificateRevocationRequest). + ResyncEvery(time.Minute). + ToController("CertificateRevocationController", eventRecorder.WithComponentSuffix(c.fieldManager)) +} + +func enqueueCertificateRevocationRequest(obj runtime.Object) []string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + return []string{key} +} + +func enqueueSecret(listCRRs func(namespace string) ([]*certificatesv1alpha1.CertificateRevocationRequest, error)) func(obj runtime.Object) []string { + return func(obj runtime.Object) []string { + secret, ok := obj.(*corev1.Secret) + if !ok { + klog.ErrorS(fmt.Errorf("unexpected object of type %T, wanted %T", obj, &corev1.Secret{}), "could not determine queue key") + return nil + } + // if this is a copied signer, queue the CRR that copied it + for _, owner := range secret.ObjectMeta.OwnerReferences { + if owner.Kind == "CertificateRevocationRequest" { + key, err := cache.MetaNamespaceKeyFunc(&metav1.ObjectMeta{ + Namespace: secret.Namespace, + Name: owner.Name, + }) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + return []string{key} + } + } + // if this is a leaf certificate, requeue any CRRs revoking the issuer + if signer, ok := signerClassForLeafCertificateSecret(secret); ok { + return enqueueForSigner(secret.Namespace, signer, listCRRs) + } + + // if this is a signer, requeue any CRRs revoking it + if signer, ok := signerClassForSecret(secret); ok { + return enqueueForSigner(secret.Namespace, signer, listCRRs) + } + return nil + } +} + +func enqueueForSigner(namespace string, signer certificates.SignerClass, listCRRs func(namespace string) ([]*certificatesv1alpha1.CertificateRevocationRequest, error)) []string { + crrs, err := listCRRs(namespace) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + var keys []string + for _, crr := range crrs { + if crr.Spec.SignerClass == string(signer) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(crr) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + keys = append(keys, key) + } + } + return keys +} + +func enqueueAll(namespace string, listCRRs func(namespace string) ([]*certificatesv1alpha1.CertificateRevocationRequest, error)) []string { + crrs, err := listCRRs(namespace) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + var keys []string + for _, crr := range crrs { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(crr) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + keys = append(keys, key) + } + return keys +} + +// TODO: we *could* store the signer -> signer class, trust bundle -> certificate, and leaf -> trunk associations +// on the objects and use lookups or indices for this. + +// signerClassForSecret determines the signer classes that the secret contains data for. +// We could use this transformation to create an index, but we expect the scale of resource +// counts for this controller to be very small (maybe O(10)) and the rate of change to be +// low, so the extra memory cost in indices is not valuable. +func signerClassForSecret(secret *corev1.Secret) (certificates.SignerClass, bool) { + return signerClassForSecretName(secret.Name) +} + +func signerClassForSecretName(name string) (certificates.SignerClass, bool) { + switch name { + case manifests.CustomerSystemAdminSigner("").Name: + return certificates.CustomerBreakGlassSigner, true + case manifests.SRESystemAdminSigner("").Name: + return certificates.SREBreakGlassSigner, true + default: + return "", false + } +} + +func secretForSignerClass(namespace string, signer certificates.SignerClass) (*corev1.Secret, bool) { + switch signer { + case certificates.CustomerBreakGlassSigner: + return manifests.CustomerSystemAdminSigner(namespace), true + case certificates.SREBreakGlassSigner: + return manifests.SRESystemAdminSigner(namespace), true + default: + return nil, false + } +} + +func signerClassForLeafCertificateSecret(secret *corev1.Secret) (certificates.SignerClass, bool) { + switch secret.Name { + case manifests.CustomerSystemAdminClientCertSecret(secret.Namespace).Name: + return certificates.CustomerBreakGlassSigner, true + case manifests.SRESystemAdminClientCertSecret(secret.Namespace).Name: + return certificates.SREBreakGlassSigner, true + default: + return "", false + } +} + +func enqueueConfigMap(listCRRs func(namespace string) ([]*certificatesv1alpha1.CertificateRevocationRequest, error)) func(obj runtime.Object) []string { + return func(obj runtime.Object) []string { + configMap, ok := obj.(*corev1.ConfigMap) + if !ok { + klog.ErrorS(fmt.Errorf("unexpected object of type %T, wanted %T", obj, &corev1.ConfigMap{}), "could not determine queue key") + return nil + } + totalClientCACM := manifests.TotalKASClientCABundle(configMap.Namespace) + if configMap.Name == totalClientCACM.Name { + return enqueueAll(configMap.Namespace, listCRRs) + } + signer, ok := signerClassForConfigMap(configMap) + if !ok { + return nil + } + return enqueueForSigner(configMap.Namespace, signer, listCRRs) + } +} + +// signerClassForConfigMap determines the signer classes that the configmap contains data for. +// We could use this transformation to create an index, but we expect the scale of resource +// counts for this controller to be very small (maybe O(10)) and the rate of change to be +// low, so the extra memory cost in indices is not valuable. +func signerClassForConfigMap(configMap *corev1.ConfigMap) (certificates.SignerClass, bool) { + switch configMap.Name { + case manifests.CustomerSystemAdminSignerCA(configMap.Namespace).Name: + return certificates.CustomerBreakGlassSigner, true + case manifests.SRESystemAdminSignerCA(configMap.Namespace).Name: + return certificates.SREBreakGlassSigner, true + default: + return "", false + } +} + +func configMapForSignerClass(namespace string, signer certificates.SignerClass) (*corev1.ConfigMap, bool) { + switch signer { + case certificates.CustomerBreakGlassSigner: + return manifests.CustomerSystemAdminSignerCA(namespace), true + case certificates.SREBreakGlassSigner: + return manifests.SRESystemAdminSignerCA(namespace), true + default: + return nil, false + } +} + +func (c *CertificateRevocationController) syncCertificateRevocationRequest(ctx context.Context, syncContext factory.SyncContext) error { + namespace, name, err := cache.SplitMetaNamespaceKey(syncContext.QueueKey()) + if err != nil { + return err + } + + action, requeue, err := c.processCertificateRevocationRequest(ctx, namespace, name, nil) + if err != nil { + return err + } + if requeue { + return factory.SyntheticRequeueError + } + if action != nil { + if err := action.validate(); err != nil { + panic(err) + } + if action.event != nil { + syncContext.Recorder().Eventf(action.event.reason, action.event.messageFmt, action.event.args...) + } + + // TODO: using force on secrets & CM since we're a different field manager - maybe collapse? + switch { + case action.crr != nil: + _, err := c.hypershiftClient.CertificatesV1alpha1().CertificateRevocationRequests(*action.crr.Namespace).ApplyStatus(ctx, action.crr, metav1.ApplyOptions{FieldManager: c.fieldManager}) + return err + case action.secret != nil: + _, err := c.kubeClient.CoreV1().Secrets(*action.secret.Namespace).Apply(ctx, action.secret, metav1.ApplyOptions{FieldManager: c.fieldManager, Force: true}) + return err + case action.cm != nil: + _, err := c.kubeClient.CoreV1().ConfigMaps(*action.cm.Namespace).Apply(ctx, action.cm, metav1.ApplyOptions{FieldManager: c.fieldManager, Force: true}) + return err + } + } + + return nil +} + +type actions struct { + event *eventInfo + crr *certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration + secret *corev1applyconfigurations.SecretApplyConfiguration + cm *corev1applyconfigurations.ConfigMapApplyConfiguration +} + +func (a *actions) validate() error { + var set int + if a.crr != nil { + set += 1 + } + if a.cm != nil { + set += 1 + } + if a.secret != nil { + set += 1 + } + if set > 1 { + return errors.New("programmer error: more than one action set") + } + return nil +} + +type eventInfo struct { + reason, messageFmt string + args []interface{} +} + +func event(reason, messageFmt string, args ...interface{}) *eventInfo { + return &eventInfo{ + reason: reason, + messageFmt: messageFmt, + args: args, + } +} + +func (c *CertificateRevocationController) processCertificateRevocationRequest(ctx context.Context, namespace, name string, now func() time.Time) (*actions, bool, error) { + if now == nil { + now = time.Now + } + + crr, err := c.getCRR(namespace, name) + if apierrors.IsNotFound(err) { + return nil, false, nil // nothing to be done, CRR is gone + } + if err != nil { + return nil, false, err + } + + for _, step := range []revocationStep{ + // we haven't seen this CRR before, so choose a revocation timestamp + c.commitRevocationTimestamp, + // a revocation timestamp exists, so we need to ensure new certs are generated after that point + c.generateNewSignerCertificate, + // new certificates exist, we need to ensure they work against the API server + c.ensureNewSignerCertificatePropagated, + // new certificates exist and are accepted by the API server, we need to re-generate leaf certificates + c.generateNewLeafCertificates, + // new certificates propagated, time to remove all previous certificates from trust bundle + c.prunePreviousSignerCertificates, + // old certificates removed, time to ensure old certificate is rejected + c.ensureOldSignerCertificateRevoked, + } { + // each step either handles the current step or hands off to the next one + done, action, requeue, err := step(ctx, namespace, name, now, crr) + if done { + return action, requeue, err + } + } + // nothing to do + return nil, false, nil +} + +type revocationStep func(ctx context.Context, namespace string, name string, now func() time.Time, crr *certificatesv1alpha1.CertificateRevocationRequest) (bool, *actions, bool, error) + +func (c *CertificateRevocationController) commitRevocationTimestamp(ctx context.Context, namespace string, name string, now func() time.Time, crr *certificatesv1alpha1.CertificateRevocationRequest) (bool, *actions, bool, error) { + if !certificates.ValidSignerClass(crr.Spec.SignerClass) { + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithConditions(conditions(crr.Status.Conditions, metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.SignerClassValidType). + WithStatus(metav1.ConditionFalse). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(certificatesv1alpha1.SignerClassUnknownReason). + WithMessage(fmt.Sprintf("Signer class %q unknown.", crr.Spec.SignerClass)), + )...) + e := event("CertificateRevocationInvalid", "Signer class %q unknown.", crr.Spec.SignerClass) + return true, &actions{event: e, crr: cfg}, false, nil + } + + if crr.Status.RevocationTimestamp == nil { + revocationTimestamp := now() + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithRevocationTimestamp(metav1.NewTime(revocationTimestamp)). + WithConditions(conditions(crr.Status.Conditions, metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.SignerClassValidType). + WithStatus(metav1.ConditionTrue). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(hypershiftv1beta1.AsExpectedReason). + WithMessage(fmt.Sprintf("Signer class %q known.", crr.Spec.SignerClass)), + )...) + e := event("CertificateRevocationStarted", "%q certificates valid before %s will be revoked.", crr.Spec.SignerClass, revocationTimestamp) + return true, &actions{event: e, crr: cfg}, false, nil + } + + return false, nil, false, nil +} + +func (c *CertificateRevocationController) generateNewSignerCertificate(ctx context.Context, namespace string, name string, now func() time.Time, crr *certificatesv1alpha1.CertificateRevocationRequest) (bool, *actions, bool, error) { + signer, ok := secretForSignerClass(namespace, certificates.SignerClass(crr.Spec.SignerClass)) + if !ok { + // we should never reach this case as we validate the class before transitioning states, and it's immutable + return true, nil, false, nil + } + + current, certs, err := c.loadCertificateSecret(signer.Namespace, signer.Name) + if err != nil { + return true, nil, false, err + } + + var certificateNeedsRegeneration bool + if len(certs) > 0 { + onlyAfter, beforeAndDuring := partitionCertificatesByValidity(certs, crr.Status.RevocationTimestamp.Time) + certificateNeedsRegeneration = len(beforeAndDuring) > 0 || len(onlyAfter) == 0 + } else { + // there's no certificate there, regenerate it + certificateNeedsRegeneration = true + } + + if certificateNeedsRegeneration { + // when we revoke a signer, we need to keep a copy of a previous leaf to verify that it's + // invalid when we're done revoking it + + // base36(sha224(value)) produces a useful, deterministic value that fits the requirements to be + // a Kubernetes object name (honoring length requirement, is a valid DNS subdomain, etc) + hash := sha256.Sum224([]byte(crr.Name)) + var i big.Int + i.SetBytes(hash[:]) + previousSignerName := i.Text(36) + _, err = c.getSecret(namespace, previousSignerName) + if err != nil && !apierrors.IsNotFound(err) { + return true, nil, false, err + } + if apierrors.IsNotFound(err) { + secretCfg := corev1applyconfigurations.Secret(previousSignerName, signer.Namespace). + WithOwnerReferences(metav1applyconfigurations.OwnerReference(). + WithAPIVersion(hypershiftv1beta1.GroupVersion.String()). + WithKind("CertificateRevocationRequest"). + WithName(crr.Name). + WithUID(crr.UID)). + WithType(corev1.SecretTypeTLS). + WithData(map[string][]byte{ + corev1.TLSCertKey: current.Data[corev1.TLSCertKey], + corev1.TLSPrivateKeyKey: current.Data[corev1.TLSPrivateKeyKey], + }) + e := event("CertificateRevocationProgressing", "Copying previous signer %s/%s to %s/%s.", signer.Namespace, signer.Name, namespace, previousSignerName) + return true, &actions{event: e, secret: secretCfg}, false, nil + } + + if crr.Status.PreviousSigner == nil { + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithRevocationTimestamp(*crr.Status.RevocationTimestamp). + WithPreviousSigner(corev1.LocalObjectReference{Name: previousSignerName}). + WithConditions(conditions(crr.Status.Conditions, metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.RootCertificatesRegeneratedType). + WithStatus(metav1.ConditionFalse). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(certificatesv1alpha1.RootCertificatesStaleReason). + WithMessage(fmt.Sprintf("Signer certificate %s/%s needs to be regenerated.", signer.Namespace, signer.Name)), + )...) + e := event("CertificateRevocationProgressing", "Recording reference to copied previous signer %s/%s.", namespace, previousSignerName) + return true, &actions{event: e, crr: cfg}, false, nil + } + + // SSA would allow us to simply send this indiscriminately, but regeneration takes time, and if we're + // reconciling after we've sent this annotation once and send it again, all we do is kick another round + // of regeneration, which is not helpful + if val, ok := current.ObjectMeta.Annotations[certrotation.CertificateNotAfterAnnotation]; !ok || val != "force-regeneration" { + secretCfg := corev1applyconfigurations.Secret(signer.Name, signer.Namespace). + WithAnnotations(map[string]string{ + certrotation.CertificateNotAfterAnnotation: "force-regeneration", + }) + e := event("CertificateRevocationProgressing", "Marking signer %s/%s for regeneration.", signer.Namespace, signer.Name) + return true, &actions{event: e, secret: secretCfg}, false, nil + } + return true, nil, false, nil + } + + var recorded bool + for _, condition := range crr.Status.Conditions { + if condition.Type == certificatesv1alpha1.RootCertificatesRegeneratedType && condition.Status == metav1.ConditionTrue { + recorded = true + break + } + } + if !recorded { + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithRevocationTimestamp(*crr.Status.RevocationTimestamp). + WithPreviousSigner(*crr.Status.PreviousSigner). + WithConditions(conditions(crr.Status.Conditions, + metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.RootCertificatesRegeneratedType). + WithStatus(metav1.ConditionTrue). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(hypershiftv1beta1.AsExpectedReason). + WithMessage(fmt.Sprintf("Signer certificate %s/%s regenerated.", signer.Namespace, signer.Name)), + metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.NewCertificatesTrustedType). + WithStatus(metav1.ConditionFalse). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(hypershiftv1beta1.WaitingForAvailableReason). + WithMessage(fmt.Sprintf("New signer certificate %s/%s not yet trusted.", signer.Namespace, signer.Name)), + )...) + e := event("CertificateRevocationProgressing", "New %q signer certificates generated.", crr.Spec.SignerClass) + return true, &actions{event: e, crr: cfg}, false, nil + } + + return false, nil, false, nil +} + +func (c *CertificateRevocationController) ensureNewSignerCertificatePropagated(ctx context.Context, namespace string, name string, now func() time.Time, crr *certificatesv1alpha1.CertificateRevocationRequest) (bool, *actions, bool, error) { + signer, ok := secretForSignerClass(namespace, certificates.SignerClass(crr.Spec.SignerClass)) + if !ok { + // we should never reach this case as we validate the class before transitioning states, and it's immutable + return true, nil, false, nil + } + + signerSecert, signers, err := c.loadCertificateSecret(signer.Namespace, signer.Name) + if err != nil { + return true, nil, false, err + } + if signers == nil { + return true, nil, false, nil + } + + currentCertPEM, ok := signerSecert.Data[corev1.TLSCertKey] + if !ok || len(currentCertPEM) == 0 { + return true, nil, false, fmt.Errorf("signer certificate %s/%s had no data for %s", signerSecert.Namespace, signerSecert.Name, corev1.TLSCertKey) + } + + currentKeyPEM, ok := signerSecert.Data[corev1.TLSPrivateKeyKey] + if !ok || len(currentKeyPEM) == 0 { + return true, nil, false, fmt.Errorf("signer certificate %s/%s had no data for %s", signerSecert.Namespace, signerSecert.Name, corev1.TLSPrivateKeyKey) + } + + totalClientCA := manifests.TotalKASClientCABundle(namespace) + totalClientTrustBundle, err := c.loadTrustBundleConfigMap(totalClientCA.Namespace, totalClientCA.Name) + if err != nil { + return true, nil, false, err + } + if totalClientTrustBundle == nil { + return true, nil, false, nil + } + + // the real gate for this phase is that KAS has loaded the updated trust bundle and now + // authorizes clients using certificates signed by the new signer - it is difficult to unit-test + // that, though, and it's always valid to first check that our certificates have propagated as far + // as we can tell in the system before asking the KAS, since that's expensive + if len(trustedCertificates(totalClientTrustBundle, []*certificateSecret{{cert: signers[0]}}, now)) == 0 { + return true, nil, false, nil + } + + // if the updated trust bundle has propagated as far as we can tell, let's go ahead and ask + // KAS to detect when it trusts the new signer + if !c.skipKASConnections { + kubeconfig := hcpmanifests.KASServiceKubeconfigSecret(namespace) + kubeconfigSecret, err := c.getSecret(kubeconfig.Namespace, kubeconfig.Name) + if err != nil { + return true, nil, false, fmt.Errorf("couldn't fetch guest cluster service network kubeconfig: %w", err) + } + adminClientCfg, err := clientcmd.NewClientConfigFromBytes(kubeconfigSecret.Data["kubeconfig"]) + if err != nil { + return true, nil, false, fmt.Errorf("couldn't load guest cluster service network kubeconfig: %w", err) + } + adminCfg, err := adminClientCfg.ClientConfig() + if err != nil { + return true, nil, false, fmt.Errorf("couldn't load guest cluster service network kubeconfig: %w", err) + } + certCfg := rest.AnonymousClientConfig(adminCfg) + certCfg.TLSClientConfig.CertData = currentCertPEM + certCfg.TLSClientConfig.KeyData = currentKeyPEM + + testClient, err := kubernetes.NewForConfig(certCfg) + if err != nil { + return true, nil, false, fmt.Errorf("couldn't create guest cluster client using old certificate: %w", err) + } + + _, err = testClient.AuthenticationV1().SelfSubjectReviews().Create(ctx, &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) + if apierrors.IsUnauthorized(err) { + // this is OK, things are just propagating still + return true, nil, true, nil // we need to synthetically re-queue since nothing about KAS loading will trigger us + } + if err != nil { + return true, nil, false, fmt.Errorf("couldn't send SSR to guest cluster: %w", err) + } + } + + var recorded bool + for _, condition := range crr.Status.Conditions { + if condition.Type == certificatesv1alpha1.NewCertificatesTrustedType && condition.Status == metav1.ConditionTrue { + recorded = true + break + } + } + if !recorded { + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithRevocationTimestamp(*crr.Status.RevocationTimestamp). + WithPreviousSigner(*crr.Status.PreviousSigner). + WithConditions(conditions(crr.Status.Conditions, metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.NewCertificatesTrustedType). + WithStatus(metav1.ConditionTrue). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(hypershiftv1beta1.AsExpectedReason). + WithMessage(fmt.Sprintf("New signer certificate %s/%s trusted.", signer.Namespace, signer.Name)), + )...) + e := event("CertificateRevocationProgressing", "New %q signer certificates valid.", crr.Spec.SignerClass) + return true, &actions{event: e, crr: cfg}, false, nil + } + + return false, nil, false, nil +} + +func (c *CertificateRevocationController) generateNewLeafCertificates(ctx context.Context, namespace string, name string, now func() time.Time, crr *certificatesv1alpha1.CertificateRevocationRequest) (bool, *actions, bool, error) { + signer, ok := secretForSignerClass(namespace, certificates.SignerClass(crr.Spec.SignerClass)) + if !ok { + // we should never reach this case as we validate the class before transitioning states, and it's immutable + return true, nil, false, nil + } + secrets, err := c.listSecrets(signer.Namespace) + if err != nil { + return true, nil, false, err + } + + var currentIssuer string + for _, secret := range secrets { + if secret.Name == signer.Name { + currentIssuer = secret.ObjectMeta.Annotations[certrotation.CertificateIssuer] + break + } + } + + currentIssuerName, currentIssuerTimestamp, err := parseIssuer(currentIssuer) + if err != nil { + return true, nil, false, fmt.Errorf("signer %s/%s metadata.annotations[%s] malformed: %w", signer.Namespace, signer.Name, certrotation.CertificateIssuer, err) + } + + for _, secret := range secrets { + issuer, set := secret.ObjectMeta.Annotations[certrotation.CertificateIssuer] + if !set { + continue + } + issuerName, issuerTimestamp, err := parseIssuer(issuer) + if err != nil { + return true, nil, false, fmt.Errorf("certificate %s/%s metadata.annotations[%s] malformed: %w", secret.Namespace, secret.Name, certrotation.CertificateIssuer, err) + } + if issuerName == currentIssuerName && issuerTimestamp.Before(currentIssuerTimestamp) { + secretCfg := corev1applyconfigurations.Secret(secret.Name, secret.Namespace). + WithAnnotations(map[string]string{ + certrotation.CertificateNotAfterAnnotation: "force-regeneration", + }) + e := event("CertificateRevocationProgressing", "Marking certificate %s/%s for regeneration.", secret.Namespace, secret.Name) + return true, &actions{event: e, secret: secretCfg}, false, nil + } + } + + return false, nil, false, nil +} + +func (c *CertificateRevocationController) prunePreviousSignerCertificates(ctx context.Context, namespace string, name string, now func() time.Time, crr *certificatesv1alpha1.CertificateRevocationRequest) (bool, *actions, bool, error) { + trustBundleCA, ok := configMapForSignerClass(namespace, certificates.SignerClass(crr.Spec.SignerClass)) + if !ok { + // we should never reach this case as we validate the class before transitioning states, and it's immutable + return true, nil, false, nil + } + + currentTrustBundle, err := c.loadTrustBundleConfigMap(trustBundleCA.Namespace, trustBundleCA.Name) + if err != nil { + return true, nil, false, err + } + if currentTrustBundle == nil { + return true, nil, false, nil + } + + onlyAfter, _ := partitionCertificatesByValidity(currentTrustBundle, crr.Status.RevocationTimestamp.Time) + if len(onlyAfter) != len(currentTrustBundle) { + // we need to prune the trust bundle, but first, we need to ensure that all leaf certificates + // trusted by the current bundle continue to be trusted by the filtered bundle; all leaves must + // have been regenerated for us to revoke the old certificates + secrets, err := c.listSecrets(namespace) + if err != nil { + return true, nil, false, fmt.Errorf("failed to list secrets: %w", err) + } + + var existingLeafCerts []*certificateSecret + for _, secret := range secrets { + certKeyInfo, err := certgraphanalysis.InspectSecret(secret) + if err != nil { + klog.Warningf("failed to load cert/key pair from secret %s/%s: %v", secret.Namespace, secret.Name, err) + continue + } + if certKeyInfo == nil { + continue + } + + certs, err := certutil.ParseCertsPEM(secret.Data[corev1.TLSCertKey]) + if err != nil { + return true, nil, false, fmt.Errorf("could not parse certificate in secret %s/%s: %w", secret.Namespace, secret.Name, err) + } + + if isLeafCertificate(certKeyInfo) { + existingLeafCerts = append(existingLeafCerts, &certificateSecret{ + namespace: secret.Namespace, + name: secret.Name, + cert: certs[0], + }) + } + } + + currentlyTrustedLeaves := trustedCertificates(currentTrustBundle, existingLeafCerts, now) + futureTrustedLeaves := trustedCertificates(onlyAfter, existingLeafCerts, now) + + if diff := certificateSecretNames(currentlyTrustedLeaves).Difference(certificateSecretNames(futureTrustedLeaves)); diff.Len() != 0 { + list := diff.UnsortedList() + sort.Strings(list) + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithRevocationTimestamp(*crr.Status.RevocationTimestamp). + WithPreviousSigner(*crr.Status.PreviousSigner). + WithConditions( + conditions(crr.Status.Conditions, metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.LeafCertificatesRegeneratedType). + WithStatus(metav1.ConditionFalse). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(certificatesv1alpha1.LeafCertificatesStaleReason). + WithMessage(fmt.Sprintf("Revocation would lose trust for leaf certificates: %v.", strings.Join(list, ", "))), + )..., + ) + e := event("CertificateRevocationProgressing", "Waiting for leaf certificates %v to regenerate.", strings.Join(list, ", ")) + return true, &actions{event: e, crr: cfg}, false, nil + } + + newBundlePEM, err := certutil.EncodeCertificates(onlyAfter...) + if err != nil { + return true, nil, false, fmt.Errorf("failed to encode new cert bundle for configmap %s/%s: %w", trustBundleCA.Name, trustBundleCA.Namespace, err) + } + + caCfg := corev1applyconfigurations.ConfigMap(trustBundleCA.Name, trustBundleCA.Namespace) + caCfg.WithData(map[string]string{ + "ca-bundle.crt": string(newBundlePEM), + }) + e := event("CertificateRevocationProgressing", "Pruning previous %q signer certificates from CA bundle.", crr.Spec.SignerClass) + return true, &actions{event: e, cm: caCfg}, false, nil + } + + var recorded bool + for _, condition := range crr.Status.Conditions { + if condition.Type == certificatesv1alpha1.LeafCertificatesRegeneratedType && condition.Status == metav1.ConditionTrue { + recorded = true + break + } + } + if !recorded { + // we're already pruned, we can continue + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithRevocationTimestamp(*crr.Status.RevocationTimestamp). + WithPreviousSigner(*crr.Status.PreviousSigner). + WithConditions( + conditions(crr.Status.Conditions, + metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.LeafCertificatesRegeneratedType). + WithStatus(metav1.ConditionTrue). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(hypershiftv1beta1.AsExpectedReason). + WithMessage("All leaf certificates are re-generated."), + metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.PreviousCertificatesRevokedType). + WithStatus(metav1.ConditionFalse). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(hypershiftv1beta1.WaitingForAvailableReason). + WithMessage("Previous signer certificate not yet revoked."), + )..., + ) + e := event("CertificateRevocationProgressing", "Previous %q signer certificates pruned.", crr.Spec.SignerClass) + return true, &actions{event: e, crr: cfg}, false, nil + } + + return false, nil, false, nil +} + +func (c *CertificateRevocationController) ensureOldSignerCertificateRevoked(ctx context.Context, namespace string, name string, now func() time.Time, crr *certificatesv1alpha1.CertificateRevocationRequest) (bool, *actions, bool, error) { + oldCertSecret, err := c.getSecret(namespace, crr.Status.PreviousSigner.Name) + if err != nil { + return true, nil, false, err + } + + oldCertPEM, ok := oldCertSecret.Data[corev1.TLSCertKey] + if !ok || len(oldCertPEM) == 0 { + return true, nil, false, fmt.Errorf("signer certificate %s/%s had no data for %s", oldCertSecret.Namespace, oldCertSecret.Name, corev1.TLSCertKey) + } + + oldCerts, err := certutil.ParseCertsPEM(oldCertPEM) + if err != nil { + return true, nil, false, err + } + + oldKeyPEM, ok := oldCertSecret.Data[corev1.TLSPrivateKeyKey] + if !ok || len(oldKeyPEM) == 0 { + return true, nil, false, fmt.Errorf("signer certificate %s/%s had no data for %s", oldCertSecret.Namespace, oldCertSecret.Name, corev1.TLSPrivateKeyKey) + } + + totalClientCA := manifests.TotalKASClientCABundle(namespace) + totalClientTrustBundle, err := c.loadTrustBundleConfigMap(totalClientCA.Namespace, totalClientCA.Name) + if err != nil { + return true, nil, false, err + } + if totalClientTrustBundle == nil { + return true, nil, false, nil + } + // the real gate for this phase is that KAS has loaded the updated trust bundle and no longer + // authorizes clients using certificates signed by the revoked signer - it is difficult to unit-test + // that, though, and it's always valid to first check that our certificates have propagated as far + // as we can tell in the system before asking the KAS, since that's expensive + if len(trustedCertificates(totalClientTrustBundle, []*certificateSecret{{cert: oldCerts[0]}}, now)) != 0 { + return true, nil, false, nil + } + + // if the updated trust bundle has propagated as far as we can tell, let's go ahead and ask + // KAS to ensure it no longer trusts the old signer + if !c.skipKASConnections { + kubeconfig := hcpmanifests.KASServiceKubeconfigSecret(namespace) + kubeconfigSecret, err := c.getSecret(kubeconfig.Namespace, kubeconfig.Name) + if err != nil { + return true, nil, false, fmt.Errorf("couldn't fetch guest cluster service network kubeconfig: %w", err) + } + adminClientCfg, err := clientcmd.NewClientConfigFromBytes(kubeconfigSecret.Data["kubeconfig"]) + if err != nil { + return true, nil, false, fmt.Errorf("couldn't load guest cluster service network kubeconfig: %w", err) + } + adminCfg, err := adminClientCfg.ClientConfig() + if err != nil { + return true, nil, false, fmt.Errorf("couldn't load guest cluster service network kubeconfig: %w", err) + } + certCfg := rest.AnonymousClientConfig(adminCfg) + certCfg.TLSClientConfig.CertData = oldCertPEM + certCfg.TLSClientConfig.KeyData = oldKeyPEM + + testClient, err := kubernetes.NewForConfig(certCfg) + if err != nil { + return true, nil, false, fmt.Errorf("couldn't create guest cluster client using old certificate: %w", err) + } + + _, err = testClient.AuthenticationV1().SelfSubjectReviews().Create(ctx, &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) + if err == nil { + // this is OK, things are just propagating still + return true, nil, true, nil // we need to synthetically re-queue since nothing about KAS loading will trigger us + } + if err != nil && !apierrors.IsUnauthorized(err) { + return true, nil, false, fmt.Errorf("couldn't send SSR to guest cluster: %w", err) + } + } + + var recorded bool + for _, condition := range crr.Status.Conditions { + if condition.Type == certificatesv1alpha1.PreviousCertificatesRevokedType && condition.Status == metav1.ConditionTrue { + recorded = true + break + } + } + if !recorded { + cfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(name, namespace) + cfg.Status = certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatus(). + WithRevocationTimestamp(*crr.Status.RevocationTimestamp). + WithPreviousSigner(*crr.Status.PreviousSigner). + WithConditions(conditions(crr.Status.Conditions, + metav1applyconfigurations.Condition(). + WithType(certificatesv1alpha1.PreviousCertificatesRevokedType). + WithStatus(metav1.ConditionTrue). + WithLastTransitionTime(metav1.NewTime(now())). + WithReason(hypershiftv1beta1.AsExpectedReason). + WithMessage("Previous signer certificate revoked."), + )...) + e := event("CertificateRevocationComplete", "%q signer certificates revoked.", crr.Spec.SignerClass) + return true, &actions{event: e, crr: cfg}, false, nil + } + return false, nil, false, nil +} + +func (c *CertificateRevocationController) loadCertificateSecret(namespace, name string) (*corev1.Secret, []*x509.Certificate, error) { + secret, err := c.getSecret(namespace, name) + if apierrors.IsNotFound(err) { + return nil, nil, nil // try again later + } + if err != nil { + return nil, nil, fmt.Errorf("could not fetch client cert secret %s/%s: %w", namespace, name, err) + } + + clientCertPEM, ok := secret.Data[corev1.TLSCertKey] + if !ok || len(clientCertPEM) == 0 { + return nil, nil, fmt.Errorf("found no certificate in secret %s/%s: %w", namespace, name, err) + } + + clientCertificates, err := certutil.ParseCertsPEM(clientCertPEM) + if err != nil { + return nil, nil, fmt.Errorf("could not parse certificate in secret %s/%s: %w", namespace, name, err) + } + + return secret, clientCertificates, nil +} + +func (c *CertificateRevocationController) loadTrustBundleConfigMap(namespace, name string) ([]*x509.Certificate, error) { + configMap, err := c.getConfigMap(namespace, name) + if apierrors.IsNotFound(err) { + return nil, nil // try again later + } + if err != nil { + return nil, fmt.Errorf("could not fetch configmap %s/%s: %w", namespace, name, err) + } + + caPEM, ok := configMap.Data["ca-bundle.crt"] + if !ok || len(caPEM) == 0 { + return nil, fmt.Errorf("found no trust bundle in configmap %s/%s: %w", namespace, name, err) + } + + trustBundle, err := certutil.ParseCertsPEM([]byte(caPEM)) + if err != nil { + return nil, fmt.Errorf("could not parse trust bundle in configmap %s/%s: %w", namespace, name, err) + } + + return trustBundle, nil +} + +func certificateSecretNames(leaves []*certificateSecret) sets.Set[string] { + names := sets.Set[string]{} + for _, leaf := range leaves { + names.Insert(fmt.Sprintf("%s/%s", leaf.namespace, leaf.name)) + } + return names +} + +type certificateSecret struct { + namespace, name string + cert *x509.Certificate +} + +func isLeafCertificate(certKeyPair *certgraphapi.CertKeyPair) bool { + if certKeyPair.Spec.Details.SignerDetails != nil { + return false + } + + issuerInfo := certKeyPair.Spec.CertMetadata.CertIdentifier.Issuer + if issuerInfo == nil { + return false + } + + // a certificate that's not self-signed is a leaf + return issuerInfo.CommonName != certKeyPair.Spec.CertMetadata.CertIdentifier.CommonName +} + +func trustedCertificates(trustBundle []*x509.Certificate, secrets []*certificateSecret, now func() time.Time) []*certificateSecret { + trustPool := x509.NewCertPool() + + for i := range trustBundle { + trustPool.AddCert(trustBundle[i]) + } + + verifyOpts := x509.VerifyOptions{ + Roots: trustPool, + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + CurrentTime: now(), + } + + var trusted []*certificateSecret + for i := range secrets { + if _, err := secrets[i].cert.Verify(verifyOpts); err == nil { + trusted = append(trusted, secrets[i]) + } + } + return trusted +} + +// partitionCertificatesByValidity partitions certsPEM into two disjoint sets of certificates: +// those only valid after the cutoff and those valid at or before the cutoff +func partitionCertificatesByValidity(certs []*x509.Certificate, cutoff time.Time) ([]*x509.Certificate, []*x509.Certificate) { + var onlyAfter, beforeAndDuring []*x509.Certificate + for _, cert := range certs { + if cert.NotBefore.After(cutoff) { + onlyAfter = append(onlyAfter, cert) + } else { + beforeAndDuring = append(beforeAndDuring, cert) + } + } + + return onlyAfter, beforeAndDuring +} + +// conditions provides the full list of conditions that we need to send with each SSA call - +// if one field manager sets some conditions in one call, and another set in a second, any conditions +// provided in the first but not the second will be removed. Therefore, we need to provide the whole +// list of conditions on each call. Since we are the only actor to add conditions to this resource, +// we can accumulate all current conditions and simply append the new one, or overwrite a current +// condition if we're updating the content for that type. +func conditions(existing []metav1.Condition, updated ...*metav1applyconfigurations.ConditionApplyConfiguration) []*metav1applyconfigurations.ConditionApplyConfiguration { + updatedTypes := sets.New[string]() + for _, condition := range updated { + if condition.Type == nil { + panic(fmt.Errorf("programmer error: must set a type for condition: %#v", condition)) + } + updatedTypes.Insert(*condition.Type) + } + conditions := updated + for _, condition := range existing { + if !updatedTypes.Has(condition.Type) { + conditions = append(conditions, metav1applyconfigurations.Condition(). + WithType(condition.Type). + WithStatus(condition.Status). + WithObservedGeneration(condition.ObservedGeneration). + WithLastTransitionTime(condition.LastTransitionTime). + WithReason(condition.Reason). + WithMessage(condition.Message), + ) + } + } + return conditions +} + +// parseIssuer parses an issuer identifier like "namespace_name-signer@1705510729" +// into the issuer name (namespace_name-issuer) and the timestamp (as unix seconds). +// These are created in library-go with: +// signerName := fmt.Sprintf("%s-signer@%d", c.componentName, time.Now().Unix()) +func parseIssuer(issuer string) (string, time.Time, error) { + issuerParts := strings.Split(issuer, "@") + if len(issuerParts) != 2 { + return "", time.Time{}, fmt.Errorf("issuer %q malformed: splitting by '@' resulted in %d parts, not 2", issuer, len(issuerParts)) + } + issuerName, issuerTimestamp := issuerParts[0], issuerParts[1] + issuerTimestampSeconds, err := strconv.ParseInt(issuerTimestamp, 10, 64) + if err != nil { + return "", time.Time{}, fmt.Errorf("issuer timestamp %q malformed: %w", issuerTimestamp, err) + } + return issuerName, time.Unix(issuerTimestampSeconds, 0), nil +} diff --git a/control-plane-pki-operator/certificaterevocationcontroller/certificaterevocationcontroller_test.go b/control-plane-pki-operator/certificaterevocationcontroller/certificaterevocationcontroller_test.go new file mode 100644 index 0000000000..ea99215c8a --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/certificaterevocationcontroller_test.go @@ -0,0 +1,1076 @@ +package certificaterevocationcontroller + +import ( + "context" + "crypto/x509" + "crypto/x509/pkix" + "embed" + "os" + "path/filepath" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + certificatesv1alpha1applyconfigurations "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + "github.com/openshift/hypershift/control-plane-pki-operator/manifests" + librarygocrypto "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/certrotation" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1applyconfigurations "k8s.io/client-go/applyconfigurations/core/v1" + metav1applyconfigurations "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/client-go/util/cert" + testingclock "k8s.io/utils/clock/testing" + "k8s.io/utils/ptr" +) + +// generating lots of PKI in environments where compute and/or entropy is limited (like in test containers) +// can be very slow - instead, we use precomputed PKI and allow for re-generating it if necessary +// +//go:embed testdata +var testdata embed.FS + +type pkiContainer struct { + signer *librarygocrypto.TLSCertificateConfig + clientCertKey *librarygocrypto.TLSCertificateConfig + signedCert *x509.Certificate + + raw *rawPKIContainer +} + +type testData struct { + original, future *pkiContainer +} + +type rawPKIContainer struct { + signerCert, signerKey []byte + clientCert, clientKey []byte //todo: only need pkey for client + signedCert []byte +} + +var revocationOffset = 1 * 365 * 24 * time.Hour + +func pki(t *testing.T, rotationTime time.Time) *testData { + td := &testData{ + original: &pkiContainer{ + raw: &rawPKIContainer{}, + }, + future: &pkiContainer{ + raw: &rawPKIContainer{}, + }, + } + for when, into := range map[time.Time]struct { + name string + cfg *pkiContainer + }{ + rotationTime.Add(-revocationOffset): {name: "original", cfg: td.original}, + rotationTime.Add(revocationOffset): {name: "future", cfg: td.future}, + } { + into.cfg.raw.signerKey, into.cfg.raw.signerCert = certificateAuthorityRaw(t, into.name, testingclock.NewFakeClock(when).Now) + signer, err := librarygocrypto.GetCAFromBytes(into.cfg.raw.signerCert, into.cfg.raw.signerKey) + if err != nil { + t.Fatalf("error parsing signer CA cert and key: %v", err) + } + into.cfg.signer = signer.Config + + into.cfg.raw.clientKey, into.cfg.raw.clientCert = certificateAuthorityRaw(t, into.name+"-client", testingclock.NewFakeClock(when).Now) + client, err := librarygocrypto.GetCAFromBytes(into.cfg.raw.clientCert, into.cfg.raw.clientKey) + if err != nil { + t.Fatalf("error parsing client cert and key: %v", err) + } + into.cfg.clientCertKey = client.Config + + if os.Getenv("REGENERATE_PKI") != "" { + t.Log("$REGENERATE_PKI set, generating a new signed certificate") + signedCert, err := signer.SignCertificate(&x509.Certificate{ + Subject: pkix.Name{ + CommonName: "customer-break-glass-test-whatever", + Organization: []string{"system:masters"}, + }, + NotBefore: signer.Config.Certs[0].NotBefore, + NotAfter: signer.Config.Certs[0].NotAfter, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IsCA: false, + }, client.Config.Certs[0].PublicKey) + if err != nil { + t.Fatalf("couldn't sign the client's certificate with the signer: %v", err) + } + into.cfg.signedCert = signedCert + + certPEM, err := librarygocrypto.EncodeCertificates(signedCert) + if err != nil { + t.Fatalf("couldn't encode signed cert: %v", err) + } + if err := os.WriteFile(filepath.Join("testdata", into.name+"-client.signed.tls.crt"), certPEM, 0666); err != nil { + t.Fatalf("failed to write re-generated certificate: %v", err) + } + into.cfg.raw.signedCert = certPEM + } else { + t.Log("loading signed certificate from disk, use $REGENERATE_PKI to generate a new one") + pemb, err := testdata.ReadFile(filepath.Join("testdata", into.name+"-client.signed.tls.crt")) + if err != nil { + t.Fatalf("failed to read signed cert: %v", err) + } + certs, err := cert.ParseCertsPEM(pemb) + if err != nil { + t.Fatalf("failed to parse signed cert: %v", err) + } + if len(certs) != 1 { + t.Fatalf("got %d signed certs, expected one", len(certs)) + } + into.cfg.signedCert = certs[0] + into.cfg.raw.signedCert = pemb + } + } + + return td + +} + +func certificateAuthorityRaw(t *testing.T, prefix string, now func() time.Time) ([]byte, []byte) { + if os.Getenv("REGENERATE_PKI") != "" { + t.Log("$REGENERATE_PKI set, generating a new cert/key pair") + cfg, err := librarygocrypto.UnsafeMakeSelfSignedCAConfigForDurationAtTime("test-signer", now, time.Hour*24*365*100) + if err != nil { + t.Fatalf("could not generate self-signed CA: %v", err) + } + + certb, keyb, err := cfg.GetPEMBytes() + if err != nil { + t.Fatalf("failed to marshal CA cert and key: %v", err) + } + + if err := os.WriteFile(filepath.Join("testdata", prefix+".tls.key"), keyb, 0666); err != nil { + t.Fatalf("failed to write re-generated private key: %v", err) + } + + if err := os.WriteFile(filepath.Join("testdata", prefix+".tls.crt"), certb, 0666); err != nil { + t.Fatalf("failed to write re-generated certificate: %v", err) + } + + return keyb, certb + } + + t.Log("loading certificate/key pair from disk, use $REGENERATE_PKI to generate new ones") + keyPem, err := testdata.ReadFile(filepath.Join("testdata", prefix+".tls.key")) + if err != nil { + t.Fatalf("failed to read private key: %v", err) + } + certPem, err := testdata.ReadFile(filepath.Join("testdata", prefix+".tls.crt")) + if err != nil { + t.Fatalf("failed to read certificate: %v", err) + } + return keyPem, certPem +} + +func TestCertificateRevocationController_processCertificateRevocationRequest(t *testing.T) { + revocationTime, err := time.Parse(time.RFC3339Nano, "2006-01-02T15:04:05.999999999Z") + if err != nil { + t.Fatalf("could not parse time: %v", err) + } + revocationClock := testingclock.NewFakeClock(revocationTime) + postRevocationClock := testingclock.NewFakeClock(revocationTime.Add(revocationOffset + 1*time.Hour)) + + data := pki(t, revocationTime) + + for _, testCase := range []struct { + name string + crrNamespace, crrName string + crr *certificatesv1alpha1.CertificateRevocationRequest + secrets []*corev1.Secret + cm *corev1.ConfigMap + cms []*corev1.ConfigMap + now func() time.Time + + expectedErr bool + expectedRequeue bool + expected *actions + }{ + { + name: "invalid signer class is flagged", + now: revocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: "invalid"}, + }, + expected: &actions{ + crr: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Namespace: ptr.To("crr-ns"), + Name: ptr.To("crr-name"), + }, + Status: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatusApplyConfiguration{ + Conditions: []metav1applyconfigurations.ConditionApplyConfiguration{{ + Type: ptr.To(certificatesv1alpha1.SignerClassValidType), + Status: ptr.To(metav1.ConditionFalse), + LastTransitionTime: ptr.To(metav1.NewTime(revocationClock.Now())), + Reason: ptr.To(certificatesv1alpha1.SignerClassUnknownReason), + Message: ptr.To(`Signer class "invalid" unknown.`), + }}, + }, + }, + }, + }, + { + name: "a timestamp is chosen if one does not exist", + now: revocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + }, + expected: &actions{ + crr: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Namespace: ptr.To("crr-ns"), + Name: ptr.To("crr-name"), + }, + Status: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatusApplyConfiguration{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + Conditions: []metav1applyconfigurations.ConditionApplyConfiguration{{ + Type: ptr.To(certificatesv1alpha1.SignerClassValidType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(revocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`Signer class "customer-break-glass" known.`), + }}, + }, + }, + }, + }, + { + name: "current signer is copied if none exists", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }}, + expected: &actions{ + secret: &corev1applyconfigurations.SecretApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Name: ptr.To("1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"), + Namespace: ptr.To("crr-ns"), + OwnerReferences: []metav1applyconfigurations.OwnerReferenceApplyConfiguration{{ + APIVersion: ptr.To(hypershiftv1beta1.SchemeGroupVersion.String()), + Kind: ptr.To("CertificateRevocationRequest"), + Name: ptr.To("crr-name"), + }}, + }, + Type: ptr.To(corev1.SecretTypeTLS), + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, + }, + }, + { + name: "status updated to contain copied signer when copy exists", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }}, + expected: &actions{ + crr: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Namespace: ptr.To("crr-ns"), + Name: ptr.To("crr-name"), + }, + Status: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatusApplyConfiguration{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{ + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Conditions: []metav1applyconfigurations.ConditionApplyConfiguration{{ + Type: ptr.To(certificatesv1alpha1.RootCertificatesRegeneratedType), + Status: ptr.To(metav1.ConditionFalse), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(certificatesv1alpha1.RootCertificatesStaleReason), + Message: ptr.To(`Signer certificate crr-ns/customer-system-admin-signer needs to be regenerated.`), + }}, + }, + }, + }, + }, + { + name: "copies finished means we annotate for regeneration", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }}, + expected: &actions{ + secret: &corev1applyconfigurations.SecretApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Name: ptr.To(manifests.CustomerSystemAdminSigner("").Name), + Namespace: ptr.To("crr-ns"), + Annotations: map[string]string{ + certrotation.CertificateNotAfterAnnotation: "force-regeneration", + }, + }, + }, + }, + }, + { + name: "new signer generated, mark as such", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }}, + expected: &actions{ + crr: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Namespace: ptr.To("crr-ns"), + Name: ptr.To("crr-name"), + }, + Status: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatusApplyConfiguration{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{ + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Conditions: []metav1applyconfigurations.ConditionApplyConfiguration{{ + Type: ptr.To(certificatesv1alpha1.RootCertificatesRegeneratedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`Signer certificate crr-ns/customer-system-admin-signer regenerated.`), + }, { + Type: ptr.To(certificatesv1alpha1.NewCertificatesTrustedType), + Status: ptr.To(metav1.ConditionFalse), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.WaitingForAvailableReason), + Message: ptr.To(`New signer certificate crr-ns/customer-system-admin-signer not yet trusted.`), + }}, + }, + }, + }, + }, + { + name: "not yet propagated, nothing to do", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + Conditions: []metav1.Condition{{ + Type: certificatesv1alpha1.RootCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `Signer certificate crr-ns/customer-system-admin-signer regenerated.`, + }}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }}, + cms: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.TotalKASClientCABundle("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert), + }, + }}, + }, + { + name: "propagated, mark as trusted", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + Conditions: []metav1.Condition{{ + Type: certificatesv1alpha1.RootCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `Signer certificate crr-ns/customer-system-admin-signer regenerated.`, + }}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }}, + cms: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.TotalKASClientCABundle("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert) + string(data.future.raw.signerCert), + }, + }}, + expected: &actions{ + crr: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Namespace: ptr.To("crr-ns"), + Name: ptr.To("crr-name"), + }, + Status: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatusApplyConfiguration{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{ + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Conditions: []metav1applyconfigurations.ConditionApplyConfiguration{{ + Type: ptr.To(certificatesv1alpha1.NewCertificatesTrustedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`New signer certificate crr-ns/customer-system-admin-signer trusted.`), + }, { + Type: ptr.To(certificatesv1alpha1.RootCertificatesRegeneratedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`Signer certificate crr-ns/customer-system-admin-signer regenerated.`), + }}, + }, + }, + }, + }, + { + name: "leaf certificate not yet regenerated, annotate them", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + Conditions: []metav1.Condition{{ + Type: certificatesv1alpha1.RootCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `Signer certificate crr-ns/customer-system-admin-signer regenerated.`, + }, { + Type: certificatesv1alpha1.NewCertificatesTrustedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `New signer certificate crr-ns/customer-system-admin-signer trusted.`, + }}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminClientCertSecret("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@0123"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signedCert, + corev1.TLSPrivateKeyKey: data.original.raw.clientKey, + }, + }}, + cms: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.CustomerSystemAdminSignerCA("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert) + string(data.future.raw.signerCert), + }, + }, { + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.TotalKASClientCABundle("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert) + string(data.future.raw.signerCert), + }, + }}, + expected: &actions{ + secret: &corev1applyconfigurations.SecretApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Name: ptr.To(manifests.CustomerSystemAdminClientCertSecret("").Name), + Namespace: ptr.To("crr-ns"), + Annotations: map[string]string{ + certrotation.CertificateNotAfterAnnotation: "force-regeneration", + }, + }, + }, + }, + }, + { + name: "leaf certificate already regenerated", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + Conditions: []metav1.Condition{{ + Type: certificatesv1alpha1.RootCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `Signer certificate crr-ns/customer-system-admin-signer regenerated.`, + }, { + Type: certificatesv1alpha1.NewCertificatesTrustedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `New signer certificate crr-ns/customer-system-admin-signer trusted.`, + }}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminClientCertSecret("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signedCert, + corev1.TLSPrivateKeyKey: data.future.raw.clientKey, + }, + }}, + cms: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.CustomerSystemAdminSignerCA("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert) + string(data.future.raw.signerCert), + }, + }, { + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.TotalKASClientCABundle("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert) + string(data.future.raw.signerCert), + }, + }}, + expected: &actions{ + cm: &corev1applyconfigurations.ConfigMapApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Name: ptr.To(manifests.CustomerSystemAdminSignerCA("").Name), + Namespace: ptr.To("crr-ns"), + }, + Data: map[string]string{ + "ca-bundle.crt": string(data.future.raw.signerCert), + }, + }, + }, + }, + { + name: "bundle only has new signers", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + Conditions: []metav1.Condition{{ + Type: certificatesv1alpha1.RootCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `Signer certificate crr-ns/customer-system-admin-signer regenerated.`, + }, { + Type: certificatesv1alpha1.NewCertificatesTrustedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `New signer certificate crr-ns/customer-system-admin-signer trusted.`, + }}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminClientCertSecret("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signedCert, + corev1.TLSPrivateKeyKey: data.future.raw.clientKey, + }, + }}, + cms: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.CustomerSystemAdminSignerCA("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.future.raw.signerCert), + }, + }, { + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.TotalKASClientCABundle("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert) + string(data.future.raw.signerCert), + }, + }}, + expected: &actions{ + crr: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Namespace: ptr.To("crr-ns"), + Name: ptr.To("crr-name"), + }, + Status: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatusApplyConfiguration{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{ + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Conditions: []metav1applyconfigurations.ConditionApplyConfiguration{{ + Type: ptr.To(certificatesv1alpha1.LeafCertificatesRegeneratedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`All leaf certificates are re-generated.`), + }, { + Type: ptr.To(certificatesv1alpha1.PreviousCertificatesRevokedType), + Status: ptr.To(metav1.ConditionFalse), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.WaitingForAvailableReason), + Message: ptr.To(`Previous signer certificate not yet revoked.`), + }, { + Type: ptr.To(certificatesv1alpha1.RootCertificatesRegeneratedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`Signer certificate crr-ns/customer-system-admin-signer regenerated.`), + }, { + Type: ptr.To(certificatesv1alpha1.NewCertificatesTrustedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`New signer certificate crr-ns/customer-system-admin-signer trusted.`), + }}, + }, + }, + }, + }, + { + name: "validating, previous still valid", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + Conditions: []metav1.Condition{{ + Type: certificatesv1alpha1.LeafCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `All leaf certificates are re-generated.`, + }, { + Type: certificatesv1alpha1.RootCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `Signer certificate crr-ns/customer-system-admin-signer regenerated.`, + }, { + Type: certificatesv1alpha1.NewCertificatesTrustedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `New signer certificate crr-ns/customer-system-admin-signer trusted.`, + }}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminClientCertSecret("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signedCert, + corev1.TLSPrivateKeyKey: data.future.raw.clientKey, + }, + }}, + cms: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.CustomerSystemAdminSignerCA("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.future.raw.signerCert), + }, + }, { + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.TotalKASClientCABundle("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.original.raw.signerCert) + string(data.future.raw.signerCert), + }, + }}, + }, + { + name: "validating, previous invalid", + now: postRevocationClock.Now, + crrNamespace: "crr-ns", + crrName: "crr-name", + crr: &certificatesv1alpha1.CertificateRevocationRequest{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: "crr-name"}, + Spec: certificatesv1alpha1.CertificateRevocationRequestSpec{SignerClass: string(certificates.CustomerBreakGlassSigner)}, + Status: certificatesv1alpha1.CertificateRevocationRequestStatus{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw"}, + Conditions: []metav1.Condition{{ + Type: certificatesv1alpha1.LeafCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `All leaf certificates are re-generated.`, + }, { + Type: certificatesv1alpha1.RootCertificatesRegeneratedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `Signer certificate crr-ns/customer-system-admin-signer regenerated.`, + }, { + Type: certificatesv1alpha1.NewCertificatesTrustedType, + Status: metav1.ConditionTrue, + LastTransitionTime: metav1.NewTime(postRevocationClock.Now()), + Reason: hypershiftv1beta1.AsExpectedReason, + Message: `New signer certificate crr-ns/customer-system-admin-signer trusted.`, + }}, + }, + }, + secrets: []*corev1.Secret{{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminSigner("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signerCert, + corev1.TLSPrivateKeyKey: data.future.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.original.raw.signerCert, + corev1.TLSPrivateKeyKey: data.original.raw.signerKey, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "crr-ns", + Name: manifests.CustomerSystemAdminClientCertSecret("").Name, + Annotations: map[string]string{certrotation.CertificateIssuer: "crr-ns_customer-break-glass-signer@1234"}, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: data.future.raw.signedCert, + corev1.TLSPrivateKeyKey: data.future.raw.clientKey, + }, + }}, + cms: []*corev1.ConfigMap{{ + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.CustomerSystemAdminSignerCA("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.future.raw.signerCert), + }, + }, { + ObjectMeta: metav1.ObjectMeta{Namespace: "crr-ns", Name: manifests.TotalKASClientCABundle("").Name}, + Data: map[string]string{ + "ca-bundle.crt": string(data.future.raw.signerCert), + }, + }}, + expected: &actions{ + crr: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestApplyConfiguration{ + ObjectMetaApplyConfiguration: &metav1applyconfigurations.ObjectMetaApplyConfiguration{ + Namespace: ptr.To("crr-ns"), + Name: ptr.To("crr-name"), + }, + Status: &certificatesv1alpha1applyconfigurations.CertificateRevocationRequestStatusApplyConfiguration{ + RevocationTimestamp: ptr.To(metav1.NewTime(revocationClock.Now())), + PreviousSigner: &corev1.LocalObjectReference{ + Name: "1pfcydcz358pa1glirkmc72sdkf5zw21uam4jbnj03pw", + }, + Conditions: []metav1applyconfigurations.ConditionApplyConfiguration{{ + Type: ptr.To(certificatesv1alpha1.PreviousCertificatesRevokedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`Previous signer certificate revoked.`), + }, { + Type: ptr.To(certificatesv1alpha1.LeafCertificatesRegeneratedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`All leaf certificates are re-generated.`), + }, { + Type: ptr.To(certificatesv1alpha1.RootCertificatesRegeneratedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`Signer certificate crr-ns/customer-system-admin-signer regenerated.`), + }, { + Type: ptr.To(certificatesv1alpha1.NewCertificatesTrustedType), + Status: ptr.To(metav1.ConditionTrue), + LastTransitionTime: ptr.To(metav1.NewTime(postRevocationClock.Now())), + Reason: ptr.To(hypershiftv1beta1.AsExpectedReason), + Message: ptr.To(`New signer certificate crr-ns/customer-system-admin-signer trusted.`), + }}, + }, + }, + }, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + c := &CertificateRevocationController{ + getCRR: func(namespace, name string) (*certificatesv1alpha1.CertificateRevocationRequest, error) { + if namespace == testCase.crr.Namespace && name == testCase.crr.Name { + return testCase.crr, nil + } + return nil, apierrors.NewNotFound(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificaterevovcationrequest").GroupResource(), name) + }, + getSecret: func(namespace, name string) (*corev1.Secret, error) { + for _, secret := range testCase.secrets { + if secret.Namespace == namespace && secret.Name == name { + return secret, nil + } + } + return nil, apierrors.NewNotFound(corev1.SchemeGroupVersion.WithResource("secrets").GroupResource(), name) + }, + listSecrets: func(namespace string) ([]*corev1.Secret, error) { + return testCase.secrets, nil + }, + getConfigMap: func(namespace, name string) (*corev1.ConfigMap, error) { + for _, cm := range testCase.cms { + if namespace == cm.Namespace && name == cm.Name { + return cm, nil + } + } + return nil, apierrors.NewNotFound(corev1.SchemeGroupVersion.WithResource("configmaps").GroupResource(), name) + }, + skipKASConnections: true, + } + a, requeue, err := c.processCertificateRevocationRequest(context.Background(), testCase.crrNamespace, testCase.crrName, testCase.now) + if actual, expected := requeue, testCase.expectedRequeue; actual != expected { + t.Errorf("incorrect requeue: %v != %v", actual, expected) + } + if testCase.expectedErr && err == nil { + t.Errorf("expected an error but got none") + } else if !testCase.expectedErr && err != nil { + t.Errorf("expected no error but got: %v", err) + } + if diff := cmp.Diff(a, testCase.expected, compareActions()...); diff != "" { + t.Errorf("invalid actions: %v", diff) + } + }) + } +} + +func compareActions() []cmp.Option { + return []cmp.Option{ + cmp.AllowUnexported(actions{}), + cmpopts.IgnoreTypes( + &eventInfo{}, // these are just informative + metav1applyconfigurations.TypeMetaApplyConfiguration{}, // these are entirely set by generated code + ), + cmpopts.IgnoreFields(metav1applyconfigurations.OwnerReferenceApplyConfiguration{}, "UID"), + cmpopts.IgnoreFields(metav1applyconfigurations.ConditionApplyConfiguration{}, "ObservedGeneration"), + } +} diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.signed.tls.crt b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.signed.tls.crt new file mode 100644 index 0000000000..c551167eff --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.signed.tls.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh6gAwIBAgIISnY5YTVCIr8wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UE +AxMLdGVzdC1zaWduZXIwIBcNMDcwMTAyMTUwNDA0WhgPMjEwNjEyMDkxNTA0MDVa +MEYxFzAVBgNVBAoTDnN5c3RlbTptYXN0ZXJzMSswKQYDVQQDEyJjdXN0b21lci1i +cmVhay1nbGFzcy10ZXN0LXdoYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAri+/qQgoXyXD9sP4e6MqywFMaNsoh67OEyV8LB9pvbyr9AN2Fmnl +2uF+g1HW9MkaiNyGKDDpJZLISeHzicFy1REGE8asQbc+nzY+0RY4Eyng3xoUvEKh +p23jdfBoWoEC8EaRJM7gGJqk0+WufCvurrmhrMy5/Sjc/g9XOqxxCScqQbuESeUH +BXPffKhNbq6f6p8x4Akdgtwu5hTUWf94jx+p5iS+enPXauwhgcRzXNiN6CEiNTtE +8Ziah7vTuOwO5LZNoKPYLKIaGLGu3WUcsj+XEo33wnzvRC5n+PFnVRWISqk1sn2K ++BsmuS+vXb7OWqlXYVGX14p+4n4IbsvjqwIDAQABo1YwVDAOBgNVHQ8BAf8EBAMC +B4AwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAW +gBRXTDqwkeB6jN7yHpqjMtFjgHZUQDANBgkqhkiG9w0BAQsFAAOCAQEAl6bpgs30 +6dh98aBbVYa+vzRuqQ7/AkzaQisoVsa5hgEf85WPwtVlcZO4uDLxFxxclEdwHEt4 +BWO/zJS2uWEwV1NW4ZXlkijmVSpsQo2bi56GM8O6n9lh12B5SjCx0MboycFWz0Tj +wfq7gf1mrOTbenArrcYy4oc362ClbszFlv3y7chVHZZ6KKSlKRIT4Ev+wU8H5G9u +xCCpN1UVb+zlz1DT/aMxRBSucYcK3WQk6ITy+TEv5DIFSPHjBhxCj7AtpSENRNGN +cKXqGI8GwbE2cZpf80LUOIP3pYYZ5THiMnSnthCuq/00a+BRWC/HYd1PUoBe4iQQ +S6PLE7gpJAj+lw== +-----END CERTIFICATE----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.tls.crt b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.tls.crt new file mode 100644 index 0000000000..cc3ecfbe85 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.tls.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIIBm+z5+g2lS8wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UE +AxMLdGVzdC1zaWduZXIwIBcNMDcwMTAyMTUwNDA0WhgPMjEwNjEyMDkxNTA0MDVa +MBYxFDASBgNVBAMTC3Rlc3Qtc2lnbmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAri+/qQgoXyXD9sP4e6MqywFMaNsoh67OEyV8LB9pvbyr9AN2Fmnl +2uF+g1HW9MkaiNyGKDDpJZLISeHzicFy1REGE8asQbc+nzY+0RY4Eyng3xoUvEKh +p23jdfBoWoEC8EaRJM7gGJqk0+WufCvurrmhrMy5/Sjc/g9XOqxxCScqQbuESeUH +BXPffKhNbq6f6p8x4Akdgtwu5hTUWf94jx+p5iS+enPXauwhgcRzXNiN6CEiNTtE +8Ziah7vTuOwO5LZNoKPYLKIaGLGu3WUcsj+XEo33wnzvRC5n+PFnVRWISqk1sn2K ++BsmuS+vXb7OWqlXYVGX14p+4n4IbsvjqwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMC +AqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU+lcRPR32xFgMgXwEg+0muap1 +NvwwHwYDVR0jBBgwFoAU+lcRPR32xFgMgXwEg+0muap1NvwwDQYJKoZIhvcNAQEL +BQADggEBADO7BnedyDDN1Kn/kALe33js/8lgnKM6hCRbMXrkZiwH6PQwFDrA5YOQ +I7fXyi+4SofuYg9OWD/lMQ9fgKhxlQBeZ5E/QVwHhLncZKC15Nk4EmgQzOVsAYLT +kYLLY6buu8X5Pzxy0NZ0PxFieJHQ05RzZWoGU3kPapNYI8jRNxw/L0kx/H0etelQ +OvMHEFSQwotcOSxfOqiuxWg97w+i4viRvNGcaoEtIljkZBi0NgU8Hv3HdCusWXfW +hMPf8l/AbLynFMBFpHN4Cik1XcSr1CDGlIiS8NONMJzfJz3VtyMmaSIR0kvAxvfh +PimHEgHDoHCh1o7NEcnf9tfM/jpVwrE= +-----END CERTIFICATE----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.tls.key b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.tls.key new file mode 100644 index 0000000000..e07f6b2697 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future-client.tls.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAri+/qQgoXyXD9sP4e6MqywFMaNsoh67OEyV8LB9pvbyr9AN2 +Fmnl2uF+g1HW9MkaiNyGKDDpJZLISeHzicFy1REGE8asQbc+nzY+0RY4Eyng3xoU +vEKhp23jdfBoWoEC8EaRJM7gGJqk0+WufCvurrmhrMy5/Sjc/g9XOqxxCScqQbuE +SeUHBXPffKhNbq6f6p8x4Akdgtwu5hTUWf94jx+p5iS+enPXauwhgcRzXNiN6CEi +NTtE8Ziah7vTuOwO5LZNoKPYLKIaGLGu3WUcsj+XEo33wnzvRC5n+PFnVRWISqk1 +sn2K+BsmuS+vXb7OWqlXYVGX14p+4n4IbsvjqwIDAQABAoIBAFcVfi/G6VAwdFmh +vlAp0lIt8wKVVx0GsvZ1jjANAHOgqSNUu6wXA5i7leGXf+1fwYldHyFm2pkzWjk9 +4uEjOwL1AOHQOPyd3YwBtcQY5K4ICOnhgy5f7avkT4z+RV3CFDMGRLhvSTBj2DYs +JWDlIe5u1jqrG+1Kibnm94hZACY/gEWx0QcHrIeqvwryzuB5Wb5CnDf8qYaitkgO +rqq6YG6E5h3pAn/MlT1CvyutVG1+hKz5Ya10vT7C87FFEIOaV8M9xkzbXA00HizO +5Rt0Y9w/qkVbXnpS8kgQLX4+KrRSaYx9JvzxCCoJ9w3i8uuo5jAda7KtJq/iDeAM +mt1DpakCgYEA0o/UOBk2zMvD5ro3HItna15+97XoBwqtUP4P71WomRhyX4o310Eh +eEqtMDva6+yphsSEzh8OSWXU3mMEuZGoSP4ldbfKcYUHqRobocaIdjMEeXKsV9sY +emw3QPjTioG4slxCP6kFzka7BDTTQG5pfrOo4UKZ7iVVa95G2NQ0ye0CgYEA08Zr +mdjOw947Rprgj9jwghS4uMfnBhU0EjG0sbBQ0QCL3E26UTgwLj9ps2mdXXGNKwF3 +aesDIymxCswdgvxjDecVx/E2RwO8QweoH66ZNklSzSPzcKbmGKvJMs+NZ4ui69Uc +91lPkYo49WmZzMJQndjOaVI5LAExcWo4nc9vUPcCgYB7jqzQcnNGv7doGBOo62C3 +j47f2t2Z7DkB0uQU5GX32HGdAKV96ZkzVlbEfAsd8BUWoRDxRyYCCgBcsywdnIxs +sL3Ykw33iUGSiGB4kOCYw503iwP41fdKN2BA/wJbP33bI+o4Iv3mKnkpobnpECFV +mSVbcdKT/VJf5uIZ8IQ9jQKBgFBbplDGeA7SsONluXhb9Ucm3cEf+YXRXeTZf5s9 +MC1ea7O4us4+5+lknpM5rEDc6Zg8AjfquVIKa+eQ9FHTuzJ3UUiBOvtPa4xzx1Pe +SLzUrdqxnZpNelo6NSpWn21/Ct86CrfA5/Rt4pcc7wNHaJe8wPYuAQu1mDFVAQ7A +u5iZAoGBAJU9XucwDbCbKs0WZh5EiAlUgwiVsonP8jXRGeF8CbpysDlqBG5OEJ00 +Gwzpa+eJ+grOzmUazxJGmHjxRjDL6OsImf/nH9LFUqy6bJJRxHAxc+CmddeEaZYS +elh4cX6zXJ7Yb7nBXxqSrO6uogX0rZHqO6QEQaxwGk95Tibio9b9 +-----END RSA PRIVATE KEY----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/future.tls.crt b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future.tls.crt new file mode 100644 index 0000000000..efdc3eaa21 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future.tls.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIIbI1K8hW8uzowDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UE +AxMLdGVzdC1zaWduZXIwIBcNMDcwMTAyMTUwNDA0WhgPMjEwNjEyMDkxNTA0MDVa +MBYxFDASBgNVBAMTC3Rlc3Qtc2lnbmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAnUW02tCNbzLyX/2/rB/1u42OaTq0NrHFpZyaWzCHaiw8ko/m1e7Z +qC40Xh9Dy5Puuo1qr4L4HsZf6pjLuW6OK9s9TH9YSvstVw0fJAXMWT+VqG7GJQD5 +UeBbHd38kGt82ld3DL5KgLZPl06ScyIxWck4ZYFaQaVc1aW+cSn5YzaTgOpP8oDz +oFlAOxKJbqwPxwl1yrrx07E9hwM65rQNIeALNJ3eguh1M0B+6ucB84gvJKcA0E5W +5P6rtPq+QWRWZA7+aXkq7qGMFQY9GBrKYP5a2HUUPbQr7npGBWIOAp35p16DpyaE +DlkxFRYhgV8I5WQlm4K6DcN1ijLvbbmqOwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMC +AqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUV0w6sJHgeoze8h6aozLRY4B2 +VEAwHwYDVR0jBBgwFoAUV0w6sJHgeoze8h6aozLRY4B2VEAwDQYJKoZIhvcNAQEL +BQADggEBACymBw7/cD7QZs3izxR/fNnEHx2QE72pvgj45WeRWYDoszj6pNckj46C +OBngQth2xbE1TBm9odLDnvvejCn1kImDmwyCyaLDswMHBnqg+j29wbKrwD4qyrDc +YTkkKUhTZ6gd2h6/jCki297sFbYn5ICJVYlgWluHjWyMpQr4SKDEZvOZnOep4MJM +vWm28IiwDxmB26HEKMfrdQta8Xo/8cpV7hyW3Dnl/1kkpGJyRl3TqUdsYNKHbBSf +3I/8FqrCE/pCycK52254lfH8jAO+4HXmiovgtvupeMXoOR/7wyX3v6zdIl3xc5mD +1afp6Tw6Jz2Mm0StKZ1Mh8wue00mnVk= +-----END CERTIFICATE----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/future.tls.key b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future.tls.key new file mode 100644 index 0000000000..7bd165f118 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/future.tls.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAnUW02tCNbzLyX/2/rB/1u42OaTq0NrHFpZyaWzCHaiw8ko/m +1e7ZqC40Xh9Dy5Puuo1qr4L4HsZf6pjLuW6OK9s9TH9YSvstVw0fJAXMWT+VqG7G +JQD5UeBbHd38kGt82ld3DL5KgLZPl06ScyIxWck4ZYFaQaVc1aW+cSn5YzaTgOpP +8oDzoFlAOxKJbqwPxwl1yrrx07E9hwM65rQNIeALNJ3eguh1M0B+6ucB84gvJKcA +0E5W5P6rtPq+QWRWZA7+aXkq7qGMFQY9GBrKYP5a2HUUPbQr7npGBWIOAp35p16D +pyaEDlkxFRYhgV8I5WQlm4K6DcN1ijLvbbmqOwIDAQABAoIBACQaED36ibzw8Ppg +AVO9smbvQ7WcKCo1/KzbmgM8zJjutqjeJ5sMTSJyGMtSWfmtZ6ujMs4/Pj2yQ+pS +UNGsMy6WESgyQ05TAkFtXayjOBl0oyIa65kq9BB7c+8TOhrc0bg9Q7LFK80IDJUr +ECslujZmHnAOlW1kTD8u4NyRShIf5PDTU9PDuDuhYbtaafuapOlYQyEaxUxKVUJd +39pUJQfX4oPoIRp6VxE1BWm+DICjUEDsWAtVd3oNRcHq5hF0+V3NPsSbbqrfs/7V +rWna/izCIJc8+2yDd/V4sFxnI2FJ7XvSFY+4zDpvuvaXLq4mXLUjCSbYjC2vDY8I +J0XKAOECgYEAxNCkQSJLED0jSvnct1nRGG0mWcF0RKoT0z1WbonqI7H9Bmz4ij69 +A7VGJupo7tDnIA44KwJeOgT0bFyCKvIJF97JtCv+slFv9GpE9qpJm9nDXJp/OLsD +uVRgOd4eopmhJDr5QsOOQ/g8XvcRoPxCfnFT0U7KPbSs9feCLy1952UCgYEAzJD1 +Ub0XZO4bqTYc4qfNI8yPYQhtTC7cwlrGSSA1iWUMbmxkGpG1Hdhv/K1S4c8dMq7B +uW0Nibj08hpzZw91MqEzpqTERKPBgV2stvOhQxa15jDuzR93ZkQN1RGO+kaGAnQ7 +B0YMK1bG5XoTYxekWpxdI1PWuV26wcRIOpoyQR8CgYEAt/MdofbwW3QY+WmA/ilH +QeI6Vud1yPuBXgzVLKlgGg6wI4JT5bnvpXiW4aZzfsnnS1Ge86vZ77ZT0LfBvWvM +TfAfa3M3MOjmj3WHkVflRnIIoxOPVrGMMHqJGWzeCzE1qAwqjlkCLcrkegnIA0Pi +zhUTtuxCH9wvUBEOLxQAufUCgYEAjoAoNCFp64gmwrAMXSORNm/oLSrmoFxAsi7z +07rZMHWwvDdLYGrB5SGBmV3Pz7csWsL79kRuWtL55rDgVRmihXtf9KTwh/Qe9xQf +HW8Hlil62viZUVCrJxUfIZ7Sn6uC7LC08fMsxP/1G6P3X173wZsNEm/zszsEvrgR +rKj/evcCgYEAtFsS+QdIksUiTglVA4XxUT9npIhGoBDoHiyaDDVloqndR/RGL5aC +BlQdknG9D+ldfRNw35JKemHGUqmI3bLGR0ByFVkrCx2HPXMVUe8mfoWC3dYweDda +WLZkYQgzPRcnZy4xXvdLlmglwvSJwALfXLt+iCSC+mVedTotlfkCO34= +-----END RSA PRIVATE KEY----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.signed.tls.crt b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.signed.tls.crt new file mode 100644 index 0000000000..9b09d448f7 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.signed.tls.crt @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDNjCCAh6gAwIBAgIIEEOUMTg7xHQwDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UE +AxMLdGVzdC1zaWduZXIwIBcNMDUwMTAyMTUwNDA0WhgPMjEwNDEyMDkxNTA0MDVa +MEYxFzAVBgNVBAoTDnN5c3RlbTptYXN0ZXJzMSswKQYDVQQDEyJjdXN0b21lci1i +cmVhay1nbGFzcy10ZXN0LXdoYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAtbWHE5QHM606dtNhgJGUMr3NQrHufpKdsc0d9izTdUMK8xLde6qL +HBPT+7cY+451M3Nrg/ZGH7GHECwdq4Cgkks92l/3kxFJv22/QyAecc0fg6C752rS +hZoaN1ovt3ggADaVLbJnm9k4P5rk7JAPMxshdb94rRSWn4Vo3AnhQp3o/a4VSTKP +JIJB8q++U41ZyO+QmxnhOwhGI6FG7B1YWTlbwirfChG+F+dpxPBf6g8iSqOFsmcQ +idKxDtrROFEU0nNcHKxZcHL2raT/I6svBlLOiqmZuFC0Mg8E9MDeHt7+63+Aavma +qms3ugqFfYCojX8jcvFmzGmrKXgC8QAZkQIDAQABo1YwVDAOBgNVHQ8BAf8EBAMC +B4AwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAW +gBTw0a2SVXYpI0HwrLV3jhyipDpMRjANBgkqhkiG9w0BAQsFAAOCAQEAXc0WhBvF +D42zgiQxVVQcaDH7rvmnHXMvMThX64kPPXVit1OXBQId2T/TE57Mdez/0mnkKV2K +56l/It61BGQNO0i9Q81GRKMAm7fkG+bK86D2KWPZQUWjerTjzt5VSPQ+JoQYldk8 +hRpLVceoZ2iAHwM3/8jznk1IlQolF1HM8JY9EzM0iQDYMt97Dkv/LDaJgsYMIYFN +bUoCexnJ7YA3b4Bnmve6BYvloidTsNEivN2o/rhUWD0dvaGcheVYq2VN+3aJlfH7 ++YgIFHQCK24Q3NIQ0FoWk/MULDkM3a1kewI3e6JN11B4AejAfu+bTKj0u+niKpn9 +3CScokTO2KdcYw== +-----END CERTIFICATE----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.tls.crt b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.tls.crt new file mode 100644 index 0000000000..6fd50e37ed --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.tls.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIIPD33tW+U5lQwDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UE +AxMLdGVzdC1zaWduZXIwIBcNMDUwMTAyMTUwNDA0WhgPMjEwNDEyMDkxNTA0MDVa +MBYxFDASBgNVBAMTC3Rlc3Qtc2lnbmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAtbWHE5QHM606dtNhgJGUMr3NQrHufpKdsc0d9izTdUMK8xLde6qL +HBPT+7cY+451M3Nrg/ZGH7GHECwdq4Cgkks92l/3kxFJv22/QyAecc0fg6C752rS +hZoaN1ovt3ggADaVLbJnm9k4P5rk7JAPMxshdb94rRSWn4Vo3AnhQp3o/a4VSTKP +JIJB8q++U41ZyO+QmxnhOwhGI6FG7B1YWTlbwirfChG+F+dpxPBf6g8iSqOFsmcQ +idKxDtrROFEU0nNcHKxZcHL2raT/I6svBlLOiqmZuFC0Mg8E9MDeHt7+63+Aavma +qms3ugqFfYCojX8jcvFmzGmrKXgC8QAZkQIDAQABo2MwYTAOBgNVHQ8BAf8EBAMC +AqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUNW5NvTDe9rsfIWb3YTmv/K/B +MIkwHwYDVR0jBBgwFoAUNW5NvTDe9rsfIWb3YTmv/K/BMIkwDQYJKoZIhvcNAQEL +BQADggEBAJWVOzV5TBHNAdOAJM7h1F9Jjmp/p4qaORcFIcjPiuHI52/aRWf8DCVi +tQhR1jhylVx0/89L7kTIJ8bM27ZqY4ilxm2v69P2qCmwnI/oVIrcOt7ZJlA5guXU +NPIu3x36NKdGEJ4W9npIN3lXxwDy9oarf9KcLk+p51RrZFBzxtIkWTVamDAlX9A0 +iQ4+PFiiKcs0hkKBAgSFI0nT7RO5NZyhpaFqWp5LlNnevDJVzPWpPRkgxxQu0/MP ++gW93cwzOsKtzyhBrStHvwMVPz1cya+xKdEuD7zFTJ60d08hZ1dPzHki23xweutc +cYd73BUzkRhW+dueg6KqZ11i91R84nQ= +-----END CERTIFICATE----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.tls.key b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.tls.key new file mode 100644 index 0000000000..a8b2eb1d36 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original-client.tls.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAtbWHE5QHM606dtNhgJGUMr3NQrHufpKdsc0d9izTdUMK8xLd +e6qLHBPT+7cY+451M3Nrg/ZGH7GHECwdq4Cgkks92l/3kxFJv22/QyAecc0fg6C7 +52rShZoaN1ovt3ggADaVLbJnm9k4P5rk7JAPMxshdb94rRSWn4Vo3AnhQp3o/a4V +STKPJIJB8q++U41ZyO+QmxnhOwhGI6FG7B1YWTlbwirfChG+F+dpxPBf6g8iSqOF +smcQidKxDtrROFEU0nNcHKxZcHL2raT/I6svBlLOiqmZuFC0Mg8E9MDeHt7+63+A +avmaqms3ugqFfYCojX8jcvFmzGmrKXgC8QAZkQIDAQABAoIBAQCI/r1CE36ChX3o +jGGcTyWOQ+7287M9lkhx/pUyPoWGiO8+Z+C4FdIfbwuJYXfiYHsYOVK8APbJeky7 +8qbD4Iwu/684btX2+TfCrXlfoF2TqvSxhoNka/MgaxiM1t/W0Sg/QOejtjfLFjAE +NEGX4Ny0ySWm4p6Wz0joJ+rwyjocwC/jottidJdjuObS5W79onThRJ7JTwxzdvJM +ikt/DI4YoA17CzT9cJW+SOZCalU+PcX3Ec2QkU7nhyfSy/yN+IrBGujDBmLGA9X6 +lrs2KqgxB4YicLDF4tKnxdUlenO+iMuUTLJwXx61Af7twRhNZ3Z2yfofOYr3wj8+ +Pi9XfZ5JAoGBANqh0UzahcHrW19G3dP/T8jOAaNfNCuExaxwMXUC2CNPNTXUpAnB +gdzZ+aQOr/RDwRP8rsgRD2s1PtrVPyqfsR74RMhZDaEr7xaJaICJDIjfnrhmxaLX +E/s00sQ2OhzO1qxKMFatV3lDHXcPIo011gKhhglq72UoOtoh5eESHOcPAoGBANTE +J6UkWLZss+R3AzLr78t3X19imevfdrQAxxnpZPjUBIKiKIahY8BLtREvFUNuQAY3 +svUuM94cfx2ZwrL9XHeUVOMhD6DQ09oD5v8Xbxrk6jPep+X52YWRiaeCr7NULLRB +/t9QOGiyAHCkG71OP3N6FuDN+HDnmpmG1ThGi/VfAoGBAM+R5FGwAl/a4NQzRvY4 +JnqCQ4HlKHXMx5PwrLPn8GaNk/o4mUj95BpXBLFilGE3Vn9wXkxqDhZ95eADp8YC ++TlrAnqoOc10Fbly2bl25GSq0llGkYsJ4dmVDCnnRgMFyUCn6v7P8gWZ18aqouYo +X7f9vHECiqiiqkVg+4xVEwW3AoGBAJbVKDXOeoV4sl74b/AdirV3PslkITIyDPi2 +xG7+InKz+y6QvqISr6CXCxnPgwd2lTTTL67YvjRrh0H4yyoQqwiqwzLxMR8Ua9tW +gN++QSmTtuRmqChE44vpDOkPoHdE6Rww3Pp66EJwTheMf43IdvrqRmXAHqwLxHGq +QGXQvU+JAoGASU9ktNVwAuFPljY9NWTr8qsTKFlSqHtulcTahaVmZtcYtnL1zI0z +9kwyMIKYwhghjyip+8I9CXw35tdajvRlMaEqiWn9W6Ok9Ayyv8lj/mtJ9rupBQ8V +FZ9X62sv5lhK6l40GlgIfpr2hu7Z0sOl7GSnAOkOblBXcxL4g17V5HA= +-----END RSA PRIVATE KEY----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/original.tls.crt b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original.tls.crt new file mode 100644 index 0000000000..338a67c1d7 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original.tls.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIIUucUJtWoYQ8wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UE +AxMLdGVzdC1zaWduZXIwIBcNMDUwMTAyMTUwNDA0WhgPMjEwNDEyMDkxNTA0MDVa +MBYxFDASBgNVBAMTC3Rlc3Qtc2lnbmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEA17Xz3y9QnXETNqyttXp5Awr9HUJmBiPokUU4wnNVelsXtxKovB+W +t9U2eGEWhmuaYAuv/+wdcu8dhDM1wq0p8vx042PtKtTMK9ArhIpuO2G4rT8TTCHU +A68rZsbrhhug/fFKgoIieTtBy+Yg8yJX70LEmtOHreMbcZd1z+GsX04O6vRIuXtZ +lq1Tks4ZgFxI7ZjZyHLOPSwJhuaVVOcziAIQTdRy50ec7grIR6y63Z6SzorbV9jS +2Osv6FKhc/P/yuDXYHC25hHGyAEOF6/mVB7tQoo99Xq7d5U2dqjOagWSHtLxdNBr +NmpZYPTkDb+o3BQnLcDjWY9W9XKCQBGjWwIDAQABo2MwYTAOBgNVHQ8BAf8EBAMC +AqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8NGtklV2KSNB8Ky1d44coqQ6 +TEYwHwYDVR0jBBgwFoAU8NGtklV2KSNB8Ky1d44coqQ6TEYwDQYJKoZIhvcNAQEL +BQADggEBAIm55Vd0HR4ejIIUihy7ZIrxLl17cRSRWZ/fEp68VbURWRT5oXSeAjwk +5FTSPcGlk5Sz3uLmANi60xmJ/3CY6mR7/G7sJL1yICdJ0QpoZmAiQRezpI6RhoaT +hzJixmxqYGNqV8Ig5eCPL+Xw7lfXenrWEtAOB7tyWFrjCMsso7LDY+J5fVdmIpPD +KFq1cQBED82pjGRhjehQsA76mmG7cUBYkZMs7pPB3CNny0yKaSFTHYAUDWjoyonr +5roSgQHxJGTaZitzpsikWJHkfFSu4FWS03zmvFXv2hefhkbhqUcAH3CaK/6icc2l +Ln01QSw5Lk1eNXBtguyDu4qYHT5klAg= +-----END CERTIFICATE----- diff --git a/control-plane-pki-operator/certificaterevocationcontroller/testdata/original.tls.key b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original.tls.key new file mode 100644 index 0000000000..ac5e98f072 --- /dev/null +++ b/control-plane-pki-operator/certificaterevocationcontroller/testdata/original.tls.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA17Xz3y9QnXETNqyttXp5Awr9HUJmBiPokUU4wnNVelsXtxKo +vB+Wt9U2eGEWhmuaYAuv/+wdcu8dhDM1wq0p8vx042PtKtTMK9ArhIpuO2G4rT8T +TCHUA68rZsbrhhug/fFKgoIieTtBy+Yg8yJX70LEmtOHreMbcZd1z+GsX04O6vRI +uXtZlq1Tks4ZgFxI7ZjZyHLOPSwJhuaVVOcziAIQTdRy50ec7grIR6y63Z6Szorb +V9jS2Osv6FKhc/P/yuDXYHC25hHGyAEOF6/mVB7tQoo99Xq7d5U2dqjOagWSHtLx +dNBrNmpZYPTkDb+o3BQnLcDjWY9W9XKCQBGjWwIDAQABAoIBADSOwLzAaoPx6RyJ +NknhbVqwcruOUg1s8l1y4EGAmHMXfs+8XCB6Ed74tCzgevyFezeroVZZ0VMPr8Fm +ONMWHgJ2QISm9EJbVuPV9MR2diVByh1sIOeL1nyPUaPZE8m5MaCuCdmCm6OuLHnh +uGWFGKfTPNP8djKIA0fJ/4qHEdimWbVAHNziw/Sn6DKsfIwTDPQzuKwApHyI/UfC +pQQFX9UMWJW3p3rPjgdL7cU6ck+QzddNiruoogwbwy/lhobjplwUScQCKz8Iq1vI +OtGjXRIdi3NLSkmGswpUJvJ/uzQnKl9VvUHilxy8lMKX7C5LWoNryzAIG8M1D+d9 +qOmXngECgYEA52nHAlNk15H2j6sNo1ATFhf45L52nWUBadNxC82UbMUmSYMSEXdw +JN0vGi7Jp8WRgFCl7pQAwOD8OxM+gZ0IllFO0TZngZ1yU6CCQHj2LnmD5lbh8YWe +TADYJovZDwTK30fr/16d9K40GUrx5SoIQhmc0z0srEJD711b5GQig+MCgYEA7qEV +YqnQNbcOiwvYVHK6sGjaPFqjLEW873bsWryC8GfbjtbPw6RQZsUG8xSmrFQGccje +vGSfrJHgWK9872qCB33CqvgUw19jxa4bbUT5nFG2xPu1rRCtzhnD97LI285clPrP +6gM3tJ54JnKqhLCMrdVY0zy14sX3eZry9HG6rCkCgYAVYWGApoHPpO250lz9NL2+ +sdJOGAbPffCGfYGZTJIlBoYGDrURpg5XaZQbgC0jcg6CY3EhPM1hBKhpMNr6kK6l +bLeyfqtLf2d7sH24RTTBkHqOQoK3lNfOP9m4nf0c9R1lSbjVLEG9xIaNu63jtWFz +8ffaUHGbLLgoGmEOFe5WbwKBgQDPpKWaM8+0XfSus4DrguXGSYbVC71+8bQE34Ot +NOnvTUA02+DwCZPYyUtRy794pqjw6+w9HIYAwPLp2NIq9o/s+tagtLxEgUWtJuJA +w8s75bLXV3vv+1pxw+PNLuousjPHgzPWGjSn21kLg62zRnkzbjkbnnFawg6k51rP +sALSeQKBgH0ng+rSUYL2nA9AKC39aOkFTZX/hfF9c7vRutIcRGzhL1v82AK3/gmc +wPKIMl5PVbMDn+L6afhDCB3DWTmqYvwFrFfpIQY81U3j3pP9TLo9VDNgHO4Tt5zZ +8lQvqrmtcIkttKMYVr+s1bjtuleLbilVI+YhSdEt8GER/TssMu1U +-----END RSA PRIVATE KEY----- diff --git a/control-plane-pki-operator/certificates/conditions.go b/control-plane-pki-operator/certificates/conditions.go new file mode 100644 index 0000000000..6fa22717ec --- /dev/null +++ b/control-plane-pki-operator/certificates/conditions.go @@ -0,0 +1,38 @@ +package certificates + +import ( + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" +) + +// The following code comes from core Kube, which can't be imported, unfortunately. These methods are copied from: +// https://github.com/kubernetes/kubernetes/blob/ec5096fa869b801d6eb1bf019819287ca61edc4d/pkg/controller/certificates/certificate_controller_utils.go#L24-L51 + +// IsCertificateRequestApproved returns true if a certificate request has the +// "Approved" condition and no "Denied" conditions; false otherwise. +func IsCertificateRequestApproved(csr *certificatesv1.CertificateSigningRequest) bool { + approved, denied := GetCertApprovalCondition(&csr.Status) + return approved && !denied +} + +// HasTrueCondition returns true if the csr contains a condition of the specified type with a status that is set to True or is empty +func HasTrueCondition(csr *certificatesv1.CertificateSigningRequest, conditionType certificatesv1.RequestConditionType) bool { + for _, c := range csr.Status.Conditions { + if c.Type == conditionType && (len(c.Status) == 0 || c.Status == corev1.ConditionTrue) { + return true + } + } + return false +} + +func GetCertApprovalCondition(status *certificatesv1.CertificateSigningRequestStatus) (approved bool, denied bool) { + for _, c := range status.Conditions { + if c.Type == certificatesv1.CertificateApproved { + approved = true + } + if c.Type == certificatesv1.CertificateDenied { + denied = true + } + } + return +} diff --git a/control-plane-pki-operator/certificates/helpers.go b/control-plane-pki-operator/certificates/helpers.go new file mode 100644 index 0000000000..3d4ea13ee9 --- /dev/null +++ b/control-plane-pki-operator/certificates/helpers.go @@ -0,0 +1,24 @@ +package certificates + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +// The following code comes from core Kube, which can't be imported, unfortunately. These methods are copied from: +// https://github.com/kubernetes/kubernetes/blob/ec5096fa869b801d6eb1bf019819287ca61edc4d/pkg/apis/certificates/v1/helpers.go#L25-L37 + +// ParseCSR decodes a PEM encoded CSR +func ParseCSR(pemBytes []byte) (*x509.CertificateRequest, error) { + // extract PEM from request object + block, _ := pem.Decode(pemBytes) + if block == nil || block.Type != "CERTIFICATE REQUEST" { + return nil, errors.New("PEM block type must be CERTIFICATE REQUEST") + } + csr, err := x509.ParseCertificateRequest(block.Bytes) + if err != nil { + return nil, err + } + return csr, nil +} diff --git a/control-plane-pki-operator/certificates/usages.go b/control-plane-pki-operator/certificates/usages.go new file mode 100644 index 0000000000..cabbbe284f --- /dev/null +++ b/control-plane-pki-operator/certificates/usages.go @@ -0,0 +1,68 @@ +package certificates + +import ( + "crypto/x509" + "fmt" + "sort" + + certificatesv1 "k8s.io/api/certificates/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +var keyUsageDict = map[certificatesv1.KeyUsage]x509.KeyUsage{ + certificatesv1.UsageSigning: x509.KeyUsageDigitalSignature, + certificatesv1.UsageDigitalSignature: x509.KeyUsageDigitalSignature, + certificatesv1.UsageContentCommitment: x509.KeyUsageContentCommitment, + certificatesv1.UsageKeyEncipherment: x509.KeyUsageKeyEncipherment, + certificatesv1.UsageKeyAgreement: x509.KeyUsageKeyAgreement, + certificatesv1.UsageDataEncipherment: x509.KeyUsageDataEncipherment, + certificatesv1.UsageCertSign: x509.KeyUsageCertSign, + certificatesv1.UsageCRLSign: x509.KeyUsageCRLSign, + certificatesv1.UsageEncipherOnly: x509.KeyUsageEncipherOnly, + certificatesv1.UsageDecipherOnly: x509.KeyUsageDecipherOnly, +} + +var extKeyUsageDict = map[certificatesv1.KeyUsage]x509.ExtKeyUsage{ + certificatesv1.UsageAny: x509.ExtKeyUsageAny, + certificatesv1.UsageServerAuth: x509.ExtKeyUsageServerAuth, + certificatesv1.UsageClientAuth: x509.ExtKeyUsageClientAuth, + certificatesv1.UsageCodeSigning: x509.ExtKeyUsageCodeSigning, + certificatesv1.UsageEmailProtection: x509.ExtKeyUsageEmailProtection, + certificatesv1.UsageSMIME: x509.ExtKeyUsageEmailProtection, + certificatesv1.UsageIPsecEndSystem: x509.ExtKeyUsageIPSECEndSystem, + certificatesv1.UsageIPsecTunnel: x509.ExtKeyUsageIPSECTunnel, + certificatesv1.UsageIPsecUser: x509.ExtKeyUsageIPSECUser, + certificatesv1.UsageTimestamping: x509.ExtKeyUsageTimeStamping, + certificatesv1.UsageOCSPSigning: x509.ExtKeyUsageOCSPSigning, + certificatesv1.UsageMicrosoftSGC: x509.ExtKeyUsageMicrosoftServerGatedCrypto, + certificatesv1.UsageNetscapeSGC: x509.ExtKeyUsageNetscapeServerGatedCrypto, +} + +// KeyUsagesFromStrings will translate a slice of usage strings from the +// certificates API ("pkg/apis/certificates".KeyUsage) to x509.KeyUsage and +// x509.ExtKeyUsage types. +func KeyUsagesFromStrings(usages []certificatesv1.KeyUsage) (x509.KeyUsage, []x509.ExtKeyUsage, error) { + var keyUsage x509.KeyUsage + var unrecognized []certificatesv1.KeyUsage + extKeyUsageSet := sets.New[x509.ExtKeyUsage]() + for _, usage := range usages { + if val, ok := keyUsageDict[usage]; ok { + keyUsage |= val + } else if val, ok := extKeyUsageDict[usage]; ok { + extKeyUsageSet.Insert(val) + } else { + unrecognized = append(unrecognized, usage) + } + } + + extKeyUsages := extKeyUsageSet.UnsortedList() + sort.Slice(extKeyUsages, func(i, j int) bool { + return extKeyUsages[i] < extKeyUsages[j] + }) + + if len(unrecognized) > 0 { + return 0, nil, fmt.Errorf("unrecognized usage values: %q", unrecognized) + } + + return keyUsage, extKeyUsages, nil +} diff --git a/control-plane-pki-operator/certificates/usages_test.go b/control-plane-pki-operator/certificates/usages_test.go new file mode 100644 index 0000000000..c3a46c2291 --- /dev/null +++ b/control-plane-pki-operator/certificates/usages_test.go @@ -0,0 +1,81 @@ +package certificates + +import ( + "crypto/x509" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + + certificatesv1 "k8s.io/api/certificates/v1" +) + +func TestKeyUsagesFromStrings(t *testing.T) { + testcases := []struct { + usages []certificatesv1.KeyUsage + expectedKeyUsage x509.KeyUsage + expectedExtKeyUsage []x509.ExtKeyUsage + expectErr bool + }{ + { + usages: []certificatesv1.KeyUsage{"signing"}, + expectedKeyUsage: x509.KeyUsageDigitalSignature, + expectedExtKeyUsage: []x509.ExtKeyUsage{}, + expectErr: false, + }, + { + usages: []certificatesv1.KeyUsage{"client auth"}, + expectedKeyUsage: 0, + expectedExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + expectErr: false, + }, + { + usages: []certificatesv1.KeyUsage{"client auth", "client auth"}, + expectedKeyUsage: 0, + expectedExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + expectErr: false, + }, + { + usages: []certificatesv1.KeyUsage{"cert sign", "encipher only"}, + expectedKeyUsage: x509.KeyUsageCertSign | x509.KeyUsageEncipherOnly, + expectedExtKeyUsage: []x509.ExtKeyUsage{}, + expectErr: false, + }, + { + usages: []certificatesv1.KeyUsage{"ocsp signing", "crl sign", "s/mime", "content commitment"}, + expectedKeyUsage: x509.KeyUsageCRLSign | x509.KeyUsageContentCommitment, + expectedExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageEmailProtection, x509.ExtKeyUsageOCSPSigning}, + expectErr: false, + }, + { + usages: []certificatesv1.KeyUsage{"unsupported string"}, + expectedKeyUsage: 0, + expectedExtKeyUsage: nil, + expectErr: true, + }, + } + + for _, tc := range testcases { + t.Run(fmt.Sprint(tc.usages), func(t *testing.T) { + ku, eku, err := KeyUsagesFromStrings(tc.usages) + + if tc.expectErr { + if err == nil { + t.Errorf("did not return an error, but expected one") + } + return + } + + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + if keyUsageDiff := cmp.Diff(ku, tc.expectedKeyUsage); keyUsageDiff != "" { + t.Errorf("got incorrect key usage: %v", keyUsageDiff) + } + if extKeyUsageDiff := cmp.Diff(eku, tc.expectedExtKeyUsage); extKeyUsageDiff != "" { + t.Errorf("got incorrect ext key usage: %v", extKeyUsageDiff) + } + }) + } +} diff --git a/control-plane-pki-operator/certificates/validation.go b/control-plane-pki-operator/certificates/validation.go new file mode 100644 index 0000000000..71a2d0a509 --- /dev/null +++ b/control-plane-pki-operator/certificates/validation.go @@ -0,0 +1,109 @@ +package certificates + +import ( + "crypto/x509" + "errors" + "fmt" + "strings" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/hypershift-operator/controllers/manifests" + certificatesv1 "k8s.io/api/certificates/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +// SignerClass is a well-known identifier for a certificate signer known to the HostedControlPlane +type SignerClass string + +const ( + // CustomerBreakGlassSigner is the signer class used to mint break-glass credentials for customers. + CustomerBreakGlassSigner SignerClass = "customer-break-glass" + // SREBreakGlassSigner is the signer class used to mint break-glass credentials for SRE. + SREBreakGlassSigner SignerClass = "sre-break-glass" +) + +func ValidSignerClass(input string) bool { + switch SignerClass(input) { + case CustomerBreakGlassSigner, SREBreakGlassSigner: + return true + default: + return false + } +} + +// ValidUsagesFor declares the valid usages for a CertificateSigningRequest, given a signer. +func ValidUsagesFor(signer SignerClass) (required, optional sets.Set[certificatesv1.KeyUsage]) { + switch signer { + case CustomerBreakGlassSigner, SREBreakGlassSigner: + return sets.New[certificatesv1.KeyUsage](certificatesv1.UsageClientAuth), + sets.New[certificatesv1.KeyUsage](certificatesv1.UsageDigitalSignature, certificatesv1.UsageKeyEncipherment) + default: + return sets.Set[certificatesv1.KeyUsage]{}, sets.Set[certificatesv1.KeyUsage]{} + } +} + +// SignerNameForHCP derives a signer name that's unique to this signer class for this specific HostedControlPlane. +func SignerNameForHCP(hcp *hypershiftv1beta1.HostedControlPlane, signer SignerClass) string { + return fmt.Sprintf("%s/%s.%s", SignerDomain, hcp.Namespace, signer) +} + +// SignerNameForHC derives a signer name that's unique to this signer class for this specific HostedControlPlane. +func SignerNameForHC(hc *hypershiftv1beta1.HostedCluster, signer SignerClass) string { + return fmt.Sprintf("%s/%s.%s", SignerDomain, manifests.HostedControlPlaneNamespace(hc.Namespace, hc.Name), signer) +} + +// SignerDomain is the domain all certificate signers identify under for HyperShift +const SignerDomain string = "hypershift.openshift.io" + +func CommonNamePrefix(signer SignerClass) string { + return fmt.Sprintf("system:%s:", signer) +} + +// ValidatorFunc knows how to validate a CertificateSigningRequest +type ValidatorFunc func(csr *certificatesv1.CertificateSigningRequest, x509cr *x509.CertificateRequest) error + +// Validator returns a function that validates CertificateSigningRequests +func Validator(hcp *hypershiftv1beta1.HostedControlPlane, signer SignerClass) ValidatorFunc { + signerName := SignerNameForHCP(hcp, signer) + requiredUsages, optionalUsages := ValidUsagesFor(signer) + validUsages := optionalUsages.Union(requiredUsages) + return func(csr *certificatesv1.CertificateSigningRequest, x509cr *x509.CertificateRequest) error { + if csr == nil { + return errors.New("the Kubernetes CertificateSigningRequest object is missing - programmer error") + } + if x509cr == nil { + return errors.New("the x509 CertificateRequest object is missing - programmer error") + } + + if csr.Spec.SignerName != signerName { + return fmt.Errorf("signer name %q does not match %q", csr.Spec.SignerName, signerName) + } + + if prefix := CommonNamePrefix(signer); !strings.HasPrefix(x509cr.Subject.CommonName, prefix) { + return fmt.Errorf("invalid certificate request: subject CommonName must begin with %q", prefix) + } + + requestedUsages := sets.New[certificatesv1.KeyUsage](csr.Spec.Usages...) + if !requestedUsages.IsSuperset(requiredUsages) { + return fmt.Errorf("missing required usages: %v", requiredUsages.Difference(requestedUsages)) + } + if !validUsages.IsSuperset(requestedUsages) { + return fmt.Errorf("invalid usages: %v", requestedUsages.Difference(validUsages)) + } + + if len(x509cr.DNSNames) > 0 { + return errors.New("invalid certificate request: DNS subjectAltNames are not allowed") + } + if len(x509cr.EmailAddresses) > 0 { + return errors.New("invalid certificate request: Email subjectAltNames are not allowed") + } + if len(x509cr.IPAddresses) > 0 { + return errors.New("invalid certificate request: IP subjectAltNames are not allowed") + } + if len(x509cr.URIs) > 0 { + return errors.New("invalid certificate request: URI subjectAltNames are not allowed") + } + + return nil + } +} diff --git a/control-plane-pki-operator/certificates/validation_test.go b/control-plane-pki-operator/certificates/validation_test.go new file mode 100644 index 0000000000..3cd7f6794d --- /dev/null +++ b/control-plane-pki-operator/certificates/validation_test.go @@ -0,0 +1,330 @@ +package certificates + +import ( + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "net" + "net/url" + "testing" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + certificatesv1 "k8s.io/api/certificates/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestValidator(t *testing.T) { + for signer, testCases := range map[SignerClass][]struct { + name string + csr *certificatesv1.CertificateSigningRequest + x509cr *x509.CertificateRequest + expectedErr bool + }{ + CustomerBreakGlassSigner: { + { + name: "invalid signer domain", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "invalid", + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "invalid signer class", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/other", + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "missing required usage", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageDigitalSignature}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "invalid usage", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageEmailProtection}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: dns names specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + DNSNames: []string{"example.com"}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: email addresses specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + EmailAddresses: []string{"someone@example.com"}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: ip addresses specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + IPAddresses: []net.IP{[]byte(`127.0.0.1`)}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: URIs specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + URIs: []*url.URL{{Scheme: "https"}}, + }, + expectedErr: true, + }, + { + name: "invalid request: common name without correct prefix", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "something"}, + }, + expectedErr: true, + }, + { + name: "valid: client auth", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + }, + }, + { + name: "valid: client auth with extras", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth, certificatesv1.UsageDigitalSignature, certificatesv1.UsageKeyEncipherment}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:customer-break-glass:user"}, + }, + }, + }, + SREBreakGlassSigner: { + { + name: "invalid signer domain", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "invalid", + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "invalid signer class", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/other", + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "missing required usage", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageDigitalSignature}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "invalid usage", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageEmailProtection}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: dns names specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + DNSNames: []string{"example.com"}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: email addresses specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + EmailAddresses: []string{"someone@example.com"}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: ip addresses specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + IPAddresses: []net.IP{[]byte(`127.0.0.1`)}, + }, + expectedErr: true, + }, + { + name: "invalid request: SAN: URIs specified", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + URIs: []*url.URL{{Scheme: "https"}}, + }, + expectedErr: true, + }, + { + name: "invalid request: common name without correct prefix", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "something"}, + }, + expectedErr: true, + }, + { + name: "valid: client auth", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + }, + }, + { + name: "valid: client auth with extras", + csr: &certificatesv1.CertificateSigningRequest{ + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + Usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth, certificatesv1.UsageDigitalSignature, certificatesv1.UsageKeyEncipherment}, + }, + }, + x509cr: &x509.CertificateRequest{ + Subject: pkix.Name{CommonName: "system:sre-break-glass:user"}, + }, + }, + }, + } { + for _, testCase := range testCases { + t.Run(fmt.Sprintf("%s.%s", signer, testCase.name), func(t *testing.T) { + validationErr := Validator(&hypershiftv1beta1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "hc-namespace-hc-name", + Name: "hcp-name", + }, + }, signer)(testCase.csr, testCase.x509cr) + if testCase.expectedErr && validationErr == nil { + t.Errorf("expected an error but got none") + } else if !testCase.expectedErr && validationErr != nil { + t.Errorf("expected no error but got: %v", validationErr) + } + }) + } + } +} diff --git a/control-plane-pki-operator/certificatesigningcontroller/certificateloadingcontroller.go b/control-plane-pki-operator/certificatesigningcontroller/certificateloadingcontroller.go new file mode 100644 index 0000000000..20986abd3c --- /dev/null +++ b/control-plane-pki-operator/certificatesigningcontroller/certificateloadingcontroller.go @@ -0,0 +1,88 @@ +package certificatesigningcontroller + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/openshift/library-go/pkg/controller/factory" + librarygocrypto "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +type CertificateLoadingController struct { + caValue atomic.Value + loaded chan interface{} + setLoaded *sync.Once + + getSigningCertKeyPairSecret func() (*corev1.Secret, error) +} + +func NewCertificateLoadingController( + rotatedSigningCASecretNamespace, rotatedSigningCASecretName string, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + eventRecorder events.Recorder, +) (func(ctx context.Context) (*librarygocrypto.CA, error), factory.Controller) { + c := &CertificateLoadingController{ + loaded: make(chan interface{}), + setLoaded: &sync.Once{}, + getSigningCertKeyPairSecret: func() (*corev1.Secret, error) { + return kubeInformersForNamespaces.InformersFor(rotatedSigningCASecretNamespace).Core().V1().Secrets().Lister().Secrets(rotatedSigningCASecretNamespace).Get(rotatedSigningCASecretName) + }, + } + + return c.CurrentCA, factory.New(). + WithInformers(kubeInformersForNamespaces.InformersFor(rotatedSigningCASecretNamespace).Core().V1().Secrets().Informer()). + WithSync(c.sync). + ResyncEvery(time.Minute). + ToController("CertificateLoadingController", eventRecorder.WithComponentSuffix("certificate-loading-controller")) +} + +func (c *CertificateLoadingController) sync(ctx context.Context, syncContext factory.SyncContext) error { + signingCertKeyPairSecret, err := c.getSigningCertKeyPairSecret() + if apierrors.IsNotFound(err) { + return nil // we need to wait for the secret to exist + } + if err != nil { + return err + } + if updated, err := c.SetCA(signingCertKeyPairSecret.Data["tls.crt"], signingCertKeyPairSecret.Data["tls.key"]); err != nil { + syncContext.Recorder().Warningf("CertificateLoadingFailed", "failed to load certificate: %v", err) + return nil // retrying this won't help + } else if updated { + syncContext.Recorder().Event("CertificateLoadingSucceeded", "loaded certificate") + } + + return nil +} + +// SetCA unconditionally stores the current cert/key content +func (c *CertificateLoadingController) SetCA(certPEM, keyPEM []byte) (bool, error) { + ca, err := librarygocrypto.GetCAFromBytes(certPEM, keyPEM) + if err != nil { + return false, fmt.Errorf("error parsing CA cert and key: %w", err) + } + c.caValue.Store(ca) + c.setLoaded.Do(func() { + close(c.loaded) + }) + + return true, nil +} + +// CurrentCA provides the current value of the CA. This is a blocking call as the value being loaded may +// not exist at the time it's being requested. +func (c *CertificateLoadingController) CurrentCA(ctx context.Context) (*librarygocrypto.CA, error) { + select { + case <-ctx.Done(): + return nil, fmt.Errorf("failed to wait for current CA: %w", ctx.Err()) + case <-c.loaded: + break + } + return c.caValue.Load().(*librarygocrypto.CA), nil +} diff --git a/control-plane-pki-operator/certificatesigningcontroller/certificateloadingcontroller_test.go b/control-plane-pki-operator/certificatesigningcontroller/certificateloadingcontroller_test.go new file mode 100644 index 0000000000..0997cf7655 --- /dev/null +++ b/control-plane-pki-operator/certificatesigningcontroller/certificateloadingcontroller_test.go @@ -0,0 +1,116 @@ +package certificatesigningcontroller + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/openshift/library-go/pkg/controller/factory" + librarygocrypto "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +func TestCertificateLoadingController_CurrentCA(t *testing.T) { + key, crt := certificateAuthorityRaw(t) + syncCtx := factory.NewSyncContext("whatever", events.NewLoggingEventRecorder("test")) + + controller := CertificateLoadingController{ + caValue: atomic.Value{}, + loaded: make(chan interface{}), + setLoaded: &sync.Once{}, + } + + t.Log("ask for the current CA before we've loaded anything") + caChan := make(chan *librarygocrypto.CA, 1) + errChan := make(chan error, 1) + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + ca, err := controller.CurrentCA(context.Background()) + caChan <- ca + errChan <- err + wg.Done() + }() + + t.Log("configure the controller not to find the CA (it does not yet exist)") + controller.getSigningCertKeyPairSecret = func() (*corev1.Secret, error) { + return nil, apierrors.NewNotFound(corev1.SchemeGroupVersion.WithResource("secrets").GroupResource(), "whatever") + } + + t.Log("expect that a sync does not error") + if err := controller.sync(context.Background(), syncCtx); err != nil { + t.Fatalf("expected no error from sync, got %v", err) + } + + t.Log("configure the controller to get the CA") + controller.getSigningCertKeyPairSecret = func() (*corev1.Secret, error) { + return &corev1.Secret{ + Data: map[string][]byte{ + "tls.crt": crt, + "tls.key": key, + }, + }, nil + } + + t.Log("expect that a sync does not error") + if err := controller.sync(context.Background(), syncCtx); err != nil { + t.Fatalf("expected no error from sync, got %v", err) + } + + t.Log("expect that our CurrentCA() call completed and loaded the correct thing") + wg.Wait() + close(caChan) + close(errChan) + var errs []error + for err := range errChan { + if err != nil { + errs = append(errs, err) + } + } + if len(errs) > 0 { + t.Fatalf("expected no error from CurrentCA(), got %v", errs) + } + + var cas []*librarygocrypto.CA + for ca := range caChan { + if ca != nil { + cas = append(cas, ca) + } + } + if len(cas) > 1 { + t.Fatalf("got more than one CA: %v", cas) + } + rawCert, rawKey, err := cas[0].Config.GetPEMBytes() + if err != nil { + t.Fatalf("unexpected error marshalling pem: %v", err) + } + if diff := cmp.Diff(rawCert, crt); diff != "" { + t.Fatalf("got incorrect cert: %v", diff) + } + if diff := cmp.Diff(rawKey, key); diff != "" { + t.Fatalf("got incorrect key: %v", diff) + } + + t.Log("expect that subsequent calls to CurrentCA() return quickly and load the correct thing") + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + ca, err := controller.CurrentCA(ctx) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + rawCert, rawKey, err = ca.Config.GetPEMBytes() + if err != nil { + t.Fatalf("unexpected error marshalling pem: %v", err) + } + if diff := cmp.Diff(rawCert, crt); diff != "" { + t.Fatalf("got incorrect cert: %v", diff) + } + if diff := cmp.Diff(rawKey, key); diff != "" { + t.Fatalf("got incorrect key: %v", diff) + } +} diff --git a/control-plane-pki-operator/certificatesigningcontroller/certificatesigningcontroller.go b/control-plane-pki-operator/certificatesigningcontroller/certificatesigningcontroller.go new file mode 100644 index 0000000000..1762029275 --- /dev/null +++ b/control-plane-pki-operator/certificatesigningcontroller/certificatesigningcontroller.go @@ -0,0 +1,254 @@ +package certificatesigningcontroller + +import ( + "context" + "crypto/x509" + "fmt" + "time" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + "github.com/openshift/library-go/pkg/controller/factory" + librarygocrypto "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + certificatesv1applyconfigurations "k8s.io/client-go/applyconfigurations/certificates/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/certificate/csr" + "k8s.io/klog/v2" +) + +type CertificateSigningController struct { + kubeClient kubernetes.Interface + + fieldManager string + signerName string + validator certificates.ValidatorFunc + getCSR func(name string) (*certificatesv1.CertificateSigningRequest, error) + getCurrentCABundleContent func(context.Context) (*librarygocrypto.CA, error) + certTTL time.Duration +} + +func NewCertificateSigningController( + hostedControlPlane *hypershiftv1beta1.HostedControlPlane, + signer certificates.SignerClass, + getCurrentCABundleContent func(context.Context) (*librarygocrypto.CA, error), + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + kubeClient kubernetes.Interface, + eventRecorder events.Recorder, + certTTL time.Duration, +) factory.Controller { + c := &CertificateSigningController{ + fieldManager: string(signer) + "-certificate-signing-controller", + kubeClient: kubeClient, + signerName: certificates.SignerNameForHCP(hostedControlPlane, signer), + validator: certificates.Validator(hostedControlPlane, signer), + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + return kubeInformersForNamespaces.InformersFor(corev1.NamespaceAll).Certificates().V1().CertificateSigningRequests().Lister().Get(name) + }, + getCurrentCABundleContent: getCurrentCABundleContent, + certTTL: certTTL, + } + + csrInformer := kubeInformersForNamespaces.InformersFor(corev1.NamespaceAll).Certificates().V1().CertificateSigningRequests().Informer() + + return factory.New(). + WithInformersQueueKeysFunc(enqueueCertificateSigningRequest, csrInformer). + WithSync(c.syncCertificateSigningRequest). + ResyncEvery(time.Minute). + ToController("CertificateSigningController", eventRecorder.WithComponentSuffix(c.fieldManager)) +} + +func enqueueCertificateSigningRequest(obj runtime.Object) []string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + return []string{key} +} + +func (c *CertificateSigningController) syncCertificateSigningRequest(ctx context.Context, syncContext factory.SyncContext) error { + _, name, err := cache.SplitMetaNamespaceKey(syncContext.QueueKey()) + if err != nil { + return err + } + + cfg, requeue, validationErr, err := c.processCertificateSigningRequest(ctx, name, nil) + if err != nil { + return err + } + if requeue { + return factory.SyntheticRequeueError + } + if cfg != nil { + if validationErr != nil { + syncContext.Recorder().Eventf("CertificateSigningRequestInvalid", "%q is invalid: %s", name, validationErr.Error()) + } else { + syncContext.Recorder().Eventf("CertificateSigningRequestValid", "%q is valid", name) + } + _, err := c.kubeClient.CertificatesV1().CertificateSigningRequests().ApplyStatus(ctx, cfg, metav1.ApplyOptions{FieldManager: c.fieldManager}) + if err != nil && validationErr == nil { + syncContext.Recorder().Eventf("CertificateSigningRequestFulfilled", "%q in %q is fulfilled", name) + } + return err + } + + return nil +} + +const backdate = 5 * time.Minute + +func (c *CertificateSigningController) processCertificateSigningRequest(ctx context.Context, name string, now func() time.Time) (*certificatesv1applyconfigurations.CertificateSigningRequestApplyConfiguration, bool, error, error) { + csr, err := c.getCSR(name) + if apierrors.IsNotFound(err) { + return nil, false, nil, nil // nothing to be done, CSR is gone + } + if err != nil { + return nil, false, nil, err + } + + // Ignore the CSR in the following conditions: + if !certificates.IsCertificateRequestApproved(csr) || // it's not yet approved + certificates.HasTrueCondition(csr, certificatesv1.CertificateFailed) || // it's already failed + csr.Spec.SignerName != c.signerName || // it doesn't match our signer + csr.Status.Certificate != nil { // it's already signed + return nil, false, nil, nil + } + + x509cr, err := certificates.ParseCSR(csr.Spec.Request) + if err != nil { + return nil, false, nil, fmt.Errorf("unable to parse csr %q: %v", csr.Name, err) + } + if validationErr := c.validator(csr, x509cr); validationErr != nil { + cfg := certificatesv1applyconfigurations.CertificateSigningRequest(name) + cfg.Status = certificatesv1applyconfigurations.CertificateSigningRequestStatus().WithConditions( + certificatesv1applyconfigurations.CertificateSigningRequestCondition(). + WithType(certificatesv1.CertificateFailed). + WithStatus(corev1.ConditionTrue). + WithReason("SignerValidationFailure"). + WithMessage(validationErr.Error()). + WithLastUpdateTime(metav1.Now()), + ) + return cfg, false, validationErr, nil + } + + ca, err := c.getCurrentCABundleContent(ctx) + if err != nil { + return nil, false, nil, err + } + + raw, err := sign(ca, x509cr, csr.Spec.Usages, c.certTTL, csr.Spec.ExpirationSeconds, now) + if err != nil { + return nil, false, nil, err + } + + cfg := certificatesv1applyconfigurations.CertificateSigningRequest(name) + cfg.Status = certificatesv1applyconfigurations.CertificateSigningRequestStatus().WithCertificate(raw...) + return cfg, false, nil, nil +} + +func sign(ca *librarygocrypto.CA, x509cr *x509.CertificateRequest, usages []certificatesv1.KeyUsage, certTTL time.Duration, expirationSeconds *int32, now func() time.Time) ([]byte, error) { + if err := x509cr.CheckSignature(); err != nil { + return nil, fmt.Errorf("unable to verify certificate request signature: %v", err) + } + + notBefore, notAfter, err := boundaries( + now, + duration(certTTL, expirationSeconds), + backdate, // this must always be less than the minimum TTL requested by a user (see sanity check requestedDuration below) + 100*backdate, // roughly 8 hours + ca.Config.Certs[0].NotAfter, + ) + if err != nil { + return nil, err + } + + x509usages, extUsages, err := certificates.KeyUsagesFromStrings(usages) + if err != nil { + return nil, err + } + + cert, err := ca.SignCertificate(&x509.Certificate{ + NotBefore: notBefore, + NotAfter: notAfter, + Subject: x509cr.Subject, + DNSNames: x509cr.DNSNames, + IPAddresses: x509cr.IPAddresses, + EmailAddresses: x509cr.EmailAddresses, + URIs: x509cr.URIs, + PublicKeyAlgorithm: x509cr.PublicKeyAlgorithm, + PublicKey: x509cr.PublicKey, + KeyUsage: x509usages, + ExtKeyUsage: extUsages, + BasicConstraintsValid: true, + IsCA: false, + }, x509cr.PublicKey) + if err != nil { + return nil, err + } + + return librarygocrypto.EncodeCertificates(cert) +} + +func duration(certTTL time.Duration, expirationSeconds *int32) time.Duration { + if expirationSeconds == nil { + return certTTL + } + + // honor requested duration is if it is less than the default TTL + // use 10 min (2x hard coded backdate above) as a sanity check lower bound + const min = 2 * backdate + switch requestedDuration := csr.ExpirationSecondsToDuration(*expirationSeconds); { + case requestedDuration > certTTL: + return certTTL + + case requestedDuration < min: + return min + + default: + return requestedDuration + } +} + +// boundaries computes NotBefore and NotAfter: +// +// All certificates set NotBefore = Now() - Backdate. +// Long-lived certificates set NotAfter = Now() + TTL - Backdate. +// Short-lived certificates set NotAfter = Now() + TTL. +// All certificates truncate NotAfter to the expiration date of the signer. +func boundaries(now func() time.Time, ttl, backdate, horizon time.Duration, signerNotAfter time.Time) (time.Time, time.Time, error) { + if now == nil { + now = time.Now + } + + instant := now() + + var notBefore, notAfter time.Time + if ttl < horizon { + // do not backdate the end time if we consider this to be a short-lived certificate + notAfter = instant.Add(ttl) + } else { + notAfter = instant.Add(ttl - backdate) + } + + if !notAfter.Before(signerNotAfter) { + notAfter = signerNotAfter + } + + if !notBefore.Before(signerNotAfter) { + return notBefore, notAfter, fmt.Errorf("the signer has expired: NotAfter=%v", signerNotAfter) + } + + if !instant.Before(signerNotAfter) { + return notBefore, notAfter, fmt.Errorf("refusing to sign a certificate that expired in the past: NotAfter=%v", signerNotAfter) + } + + return notBefore, notAfter, nil +} diff --git a/control-plane-pki-operator/certificatesigningcontroller/certificatesigningcontroller_test.go b/control-plane-pki-operator/certificatesigningcontroller/certificatesigningcontroller_test.go new file mode 100644 index 0000000000..09d1de411e --- /dev/null +++ b/control-plane-pki-operator/certificatesigningcontroller/certificatesigningcontroller_test.go @@ -0,0 +1,560 @@ +package certificatesigningcontroller + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/x509" + "crypto/x509/pkix" + "embed" + "encoding/pem" + "errors" + "math/rand" + "os" + "path/filepath" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + certificatesv1applyconfigurations "k8s.io/client-go/applyconfigurations/certificates/v1" + metav1applyconfigurations "k8s.io/client-go/applyconfigurations/meta/v1" + "k8s.io/utils/ptr" + + librarygocrypto "github.com/openshift/library-go/pkg/crypto" + + certificatesv1 "k8s.io/api/certificates/v1" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/util/certificate/csr" + testingclock "k8s.io/utils/clock/testing" + + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" +) + +// generating lots of PKI in environments where compute and/or entropy is limited (like in test containers) +// can be very slow - instead, we use precomputed PKI and allow for re-generating it if necessary +// +//go:embed testdata +var testdata embed.FS + +func privateKey(t *testing.T) crypto.PrivateKey { + if os.Getenv("REGENERATE_PKI") != "" { + t.Log("$REGENERATE_PKI set, generating a new private key") + pk, err := ecdsa.GenerateKey(elliptic.P256(), insecureRand) + if err != nil { + t.Fatalf("failed to generate private key: %v", err) + } + + der, err := x509.MarshalECPrivateKey(pk) + if err != nil { + t.Fatalf("failed to marshal private key: %v", err) + } + pkb := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: der}) + + if err := os.WriteFile(filepath.Join("testdata", "client.key"), pkb, 0666); err != nil { + t.Fatalf("failed to write re-generated private key: %v", err) + } + + return pk + } + + t.Log("loading private key from disk, use $REGENERATE_PKI to generate a new one") + pemb, err := testdata.ReadFile(filepath.Join("testdata", "client.key")) + if err != nil { + t.Fatalf("failed to read private key: %v", err) + } + der, _ := pem.Decode(pemb) + key, err := x509.ParseECPrivateKey(der.Bytes) + if err != nil { + t.Fatalf("failed to parse private key: %v", err) + } + return key +} + +func certificateAuthority(t *testing.T) *librarygocrypto.CA { + keyPem, certPem := certificateAuthorityRaw(t) + + ca, err := librarygocrypto.GetCAFromBytes(certPem, keyPem) + if err != nil { + t.Fatalf("error parsing CA cert and key: %v", err) + } + return ca +} + +func certificateAuthorityRaw(t *testing.T) ([]byte, []byte) { + if os.Getenv("REGENERATE_PKI") != "" { + t.Log("$REGENERATE_PKI set, generating a new cert/key pair") + cfg, err := librarygocrypto.MakeSelfSignedCAConfigForDuration("test-signer", time.Hour*24*365*100) + if err != nil { + t.Fatalf("could not generate self-signed CA: %v", err) + } + + certb, keyb, err := cfg.GetPEMBytes() + if err != nil { + t.Fatalf("failed to marshal CA cert and key: %v", err) + } + + if err := os.WriteFile(filepath.Join("testdata", "tls.key"), keyb, 0666); err != nil { + t.Fatalf("failed to write re-generated private key: %v", err) + } + + if err := os.WriteFile(filepath.Join("testdata", "tls.crt"), certb, 0666); err != nil { + t.Fatalf("failed to write re-generated certificate: %v", err) + } + + return keyb, certb + } + + t.Log("loading certificate/key pair from disk, use $REGENERATE_PKI to generate new ones") + keyPem, err := testdata.ReadFile(filepath.Join("testdata", "tls.key")) + if err != nil { + t.Fatalf("failed to read private key: %v", err) + } + certPem, err := testdata.ReadFile(filepath.Join("testdata", "tls.crt")) + if err != nil { + t.Fatalf("failed to read certificate: %v", err) + } + return keyPem, certPem +} + +func TestCertificateSigningController_processCertificateSigningRequest(t *testing.T) { + hcp := &hypershiftv1beta1.HostedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "hc-namespace-hc-name", + Name: "hcp-name", + }, + } + theTime, err := time.Parse(time.RFC3339Nano, "2006-01-02T15:04:05.999999999Z") + if err != nil { + t.Fatalf("could not parse time: %v", err) + } + fakeClock := testingclock.NewFakeClock(theTime) + testCSRSpec := makeTestCSRSpec(t) + cases := []struct { + description string + name string + signerName string + validator certificates.ValidatorFunc + getCSR func(name string) (*certificatesv1.CertificateSigningRequest, error) + + expectedCfg *certificatesv1applyconfigurations.CertificateSigningRequestApplyConfiguration + expectedValidationErr bool + expectedErr bool + }{ + { + description: "csr missing", + name: "test-csr", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + }, + expectedErr: false, // nothing to do, no need to error & requeue + }, + { + description: "csr not approved", + name: "test-csr", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, nil + }, + }, + { + description: "csr failed", + name: "test-csr", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateFailed, + Status: corev1.ConditionTrue, + }}, + }, + }, nil + }, + }, + { + description: "csr fulfilled", + name: "test-csr", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + }}, + Certificate: []byte(`already done!`), + }, + }, nil + }, + }, + { + description: "invalid request encoding", + name: "test-csr", + signerName: certificates.SignerNameForHCP(hcp, certificates.CustomerBreakGlassSigner), + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: certificates.SignerNameForHCP(hcp, certificates.CustomerBreakGlassSigner), + Request: []byte(`gobbly-gook`), + }, + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + }}, + }, + }, nil + }, + expectedErr: true, + }, + { + description: "invalid csr", + name: "test-csr", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: testCSRSpec(csrBuilder{ + cn: certificates.CommonNamePrefix(certificates.CustomerBreakGlassSigner) + "test-client", + org: []string{"anything"}, + signerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + usages: []certificatesv1.KeyUsage{certificatesv1.UsageContentCommitment}, + }), + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + }}, + }, + }, nil + }, + signerName: certificates.SignerNameForHCP(hcp, certificates.CustomerBreakGlassSigner), + validator: func(csr *certificatesv1.CertificateSigningRequest, x509cr *x509.CertificateRequest) error { + return errors.New("invalid") + }, + expectedCfg: &certificatesv1applyconfigurations.CertificateSigningRequestApplyConfiguration{ + Status: &certificatesv1applyconfigurations.CertificateSigningRequestStatusApplyConfiguration{ + Conditions: []certificatesv1applyconfigurations.CertificateSigningRequestConditionApplyConfiguration{ + { + Type: ptr.To(certificatesv1.CertificateFailed), + Status: ptr.To(corev1.ConditionTrue), + Reason: ptr.To("SignerValidationFailure"), + Message: ptr.To("invalid"), + }, + }, + }, + }, + expectedValidationErr: true, + }, + { + description: "valid csr", + name: "test-csr", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: testCSRSpec(csrBuilder{ + cn: certificates.CommonNamePrefix(certificates.CustomerBreakGlassSigner) + "test-client", + org: []string{"system:masters"}, + signerName: "hypershift.openshift.io/hc-namespace-hc-name.customer-break-glass", + usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }), + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + }}, + }, + }, nil + }, + signerName: certificates.SignerNameForHCP(hcp, certificates.CustomerBreakGlassSigner), + validator: certificates.Validator(hcp, certificates.CustomerBreakGlassSigner), + expectedCfg: &certificatesv1applyconfigurations.CertificateSigningRequestApplyConfiguration{ + Status: &certificatesv1applyconfigurations.CertificateSigningRequestStatusApplyConfiguration{ + Certificate: []uint8(`testdata`), + }, + }, + }, + { + description: "valid sre csr", + name: "test-csr", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: testCSRSpec(csrBuilder{ + cn: certificates.CommonNamePrefix(certificates.SREBreakGlassSigner) + "test-client", + org: []string{"system:masters"}, + signerName: "hypershift.openshift.io/hc-namespace-hc-name.sre-break-glass", + usages: []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}, + }), + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + }}, + }, + }, nil + }, + signerName: certificates.SignerNameForHCP(hcp, certificates.SREBreakGlassSigner), + validator: certificates.Validator(hcp, certificates.SREBreakGlassSigner), + expectedCfg: &certificatesv1applyconfigurations.CertificateSigningRequestApplyConfiguration{ + Status: &certificatesv1applyconfigurations.CertificateSigningRequestStatusApplyConfiguration{ + Certificate: []uint8(`testdata`), + }, + }, + }, + } + + for _, testCase := range cases { + t.Run(testCase.description, func(t *testing.T) { + ca := certificateAuthority(t) + + c := CertificateSigningController{ + validator: testCase.validator, + signerName: testCase.signerName, + getCSR: testCase.getCSR, + getCurrentCABundleContent: func(ctx context.Context) (*librarygocrypto.CA, error) { + return ca, nil + }, + certTTL: 12 * time.Hour, + } + + cfg, _, validationErr, err := c.processCertificateSigningRequest(context.Background(), testCase.name, fakeClock.Now) + if testCase.expectedErr && err == nil { + t.Errorf("expected an error but got none") + } else if !testCase.expectedErr && err != nil { + t.Errorf("expected no error but got: %v", err) + } + if testCase.expectedValidationErr && validationErr == nil { + t.Errorf("expected a validation error but got none") + } else if !testCase.expectedValidationErr && validationErr != nil { + t.Errorf("expected no validation error but got: %v", validationErr) + } + + // signing the certificate necessarily uses cryptographic randomness, so we can't know + // what the output will be a priori + if testCase.expectedCfg != nil && testCase.expectedCfg.Status != nil && testCase.expectedCfg.Status.Certificate != nil && + cfg != nil && cfg.Status != nil && cfg.Status.Certificate != nil { + testCase.expectedCfg.Status.Certificate = cfg.Status.Certificate + } + + if d := cmp.Diff(testCase.expectedCfg, cfg, + cmpopts.IgnoreTypes( + metav1applyconfigurations.TypeMetaApplyConfiguration{}, + &metav1applyconfigurations.ObjectMetaApplyConfiguration{}, + ), + cmpopts.IgnoreFields( + certificatesv1applyconfigurations.CertificateSigningRequestConditionApplyConfiguration{}, + "LastUpdateTime", "LastTransitionTime", + ), + ); d != "" { + t.Errorf("got invalid CSR cfg: %v", d) + } + }) + } +} + +// noncryptographic for faster testing +// DO NOT COPY THIS CODE +var insecureRand = rand.New(rand.NewSource(0)) + +type csrBuilder struct { + cn string + dnsNames []string + org []string + signerName string + usages []certificatesv1.KeyUsage +} + +func makeTestCSRSpec(t *testing.T) func(b csrBuilder) certificatesv1.CertificateSigningRequestSpec { + return func(b csrBuilder) certificatesv1.CertificateSigningRequestSpec { + pk := privateKey(t) + csrb, err := x509.CreateCertificateRequest(insecureRand, &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: b.cn, + Organization: b.org, + }, + DNSNames: b.dnsNames, + }, pk) + if err != nil { + t.Fatalf("failed to generate certificate request: %v", err) + } + spec := certificatesv1.CertificateSigningRequestSpec{ + Request: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrb}), + Usages: b.usages, + } + if b.signerName != "" { + spec.SignerName = b.signerName + } + return spec + } +} + +func TestSign(t *testing.T) { + fakeClock := testingclock.FakeClock{} + ca := certificateAuthority(t) + pk := privateKey(t) + csrb, err := x509.CreateCertificateRequest(insecureRand, &x509.CertificateRequest{ + Subject: pkix.Name{ + CommonName: "test-cn", + Organization: []string{"test-org"}, + }, + DNSNames: []string{"example.com"}, + }, pk) + if err != nil { + t.Fatalf("failed to generate certificate request: %v", err) + } + + x509cr, err := certificates.ParseCSR(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrb})) + if err != nil { + t.Fatalf("failed to parse CSR: %v", err) + } + + certData, err := sign(ca, x509cr, []certificatesv1.KeyUsage{ + certificatesv1.UsageSigning, + certificatesv1.UsageKeyEncipherment, + certificatesv1.UsageServerAuth, + certificatesv1.UsageClientAuth, + }, + 1*time.Hour, + // requesting a duration that is greater than TTL is ignored + csr.DurationToExpirationSeconds(3*time.Hour), + fakeClock.Now, + ) + if err != nil { + t.Fatalf("failed to sign CSR: %v", err) + } + if len(certData) == 0 { + t.Fatalf("expected a certificate after signing") + } + + certs, err := librarygocrypto.CertsFromPEM(certData) + if err != nil { + t.Fatalf("failed to parse certificate: %v", err) + } + if len(certs) != 1 { + t.Fatalf("expected one certificate") + } + + want := x509.Certificate{ + Version: 3, + Subject: pkix.Name{ + CommonName: "test-cn", + Organization: []string{"test-org"}, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + NotBefore: fakeClock.Now(), + NotAfter: fakeClock.Now().Add(1 * time.Hour), + PublicKeyAlgorithm: x509.ECDSA, + SignatureAlgorithm: x509.SHA256WithRSA, + MaxPathLen: -1, + } + + if d := cmp.Diff(*certs[0], want, diff.IgnoreUnset()); d != "" { + t.Errorf("unexpected diff: %v", d) + } +} + +func TestDuration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + certTTL time.Duration + expirationSeconds *int32 + want time.Duration + }{ + { + name: "can request shorter duration than TTL", + certTTL: time.Hour, + expirationSeconds: csr.DurationToExpirationSeconds(30 * time.Minute), + want: 30 * time.Minute, + }, + { + name: "cannot request longer duration than TTL", + certTTL: time.Hour, + expirationSeconds: csr.DurationToExpirationSeconds(3 * time.Hour), + want: time.Hour, + }, + { + name: "cannot request negative duration", + certTTL: time.Hour, + expirationSeconds: csr.DurationToExpirationSeconds(-time.Minute), + want: 10 * time.Minute, + }, + { + name: "cannot request duration less than 10 mins", + certTTL: time.Hour, + expirationSeconds: csr.DurationToExpirationSeconds(10*time.Minute - time.Second), + want: 10 * time.Minute, + }, + { + name: "can request duration of exactly 10 mins", + certTTL: time.Hour, + expirationSeconds: csr.DurationToExpirationSeconds(10 * time.Minute), + want: 10 * time.Minute, + }, + { + name: "can request duration equal to the default", + certTTL: time.Hour, + expirationSeconds: csr.DurationToExpirationSeconds(time.Hour), + want: time.Hour, + }, + { + name: "can choose not to request a duration to get the default", + certTTL: time.Hour, + expirationSeconds: nil, + want: time.Hour, + }, + } + for _, testCase := range tests { + t.Run(testCase.name, func(t *testing.T) { + if got := duration(testCase.certTTL, testCase.expirationSeconds); got != testCase.want { + t.Errorf("duration() = %v, want %v", got, testCase.want) + } + }) + } +} diff --git a/control-plane-pki-operator/certificatesigningcontroller/testdata/client.key b/control-plane-pki-operator/certificatesigningcontroller/testdata/client.key new file mode 100644 index 0000000000..69628b7457 --- /dev/null +++ b/control-plane-pki-operator/certificatesigningcontroller/testdata/client.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEINa+zE6jrl6IUmqfSleLy57y1KZTFHaNbSmXYeqeT1qmoAoGCCqGSM49 +AwEHoUQDQgAE62nJAfatjWiTMzuwvpUC8FFIgjQmU/KfiHciO7ZkYxKNaPVJgaIh +lJdtT3R7HCP3c1TitDUnnKq/Y5ZDQOlQtA== +-----END EC PRIVATE KEY----- diff --git a/control-plane-pki-operator/certificatesigningcontroller/testdata/tls.crt b/control-plane-pki-operator/certificatesigningcontroller/testdata/tls.crt new file mode 100644 index 0000000000..794d22e6d6 --- /dev/null +++ b/control-plane-pki-operator/certificatesigningcontroller/testdata/tls.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDEzCCAfugAwIBAgIIcbGyTnAKKYAwDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UE +AxMLdGVzdC1zaWduZXIwIBcNMjMxMjE0MjAxODU0WhgPMjEyMzExMjAyMDE4NTVa +MBYxFDASBgNVBAMTC3Rlc3Qtc2lnbmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAsvQEzZL0YJ78J6ec1EioI4eBCQ9Ja2t7tACJXupmKnq91Hafz5Sy +I44QM9qOhCfcZyEW70hEqeC5VgyX69qv3bBszqMNe615xDdScmqjgUwNJ5NJvRLp +/boXiGDKt3Hwvopk9DBqDq0O/y/y4Eru7O+ZuMlZ3ETab6YEt31ZV4wVVaNslQyt +M3nTQkcas4L5PD6xA/1wMc+iLeJxxQbM2aQf1KKFkkV2iBiz8Z+0sUUafrswbNmk ++FoIFesfmIPso7CbrirYZpFxDMLpdO2GHxPeVEWfmMofh5u8Lr+lEUgHOfNmQLQO +zpH+YebiS8Rkl/YOkO+zEubgjswZJN3M8wIDAQABo2MwYTAOBgNVHQ8BAf8EBAMC +AqQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUk0lo0kdrZwjr/AySJVJNgB21 +cgUwHwYDVR0jBBgwFoAUk0lo0kdrZwjr/AySJVJNgB21cgUwDQYJKoZIhvcNAQEL +BQADggEBAF/Su+38xLHVU8zx7Ov8Nwl1GVzsCSTIo89Xc9X193AnOEP0loIMSTMV +5G27qdRpCMCGeXNLhmkHqBAtVFjC/SGZF6E2owD+RCHQG+6WXHGOcHfprVObWOq8 +bzDlgPOs1MB7s+yHql9kzXP7OuJ4wtgGClQJ9w+EB6pA4wIf7xV7gecPHLmGP5eK +mVtuoxYH53+ybaWBi/Dw6iR+W/aHjPh0eMWZk5mML8/iG9v03TJXA6FERlCgxj9s +R4VhjRDYmMqkNMbhlpiH6j7cEez4MGZifcC13BKKBc4Jbcvrp004yUmHl0mvPTH6 +L1b9ksUlKDOdMEHJrzMisqcwEeROlyM= +-----END CERTIFICATE----- diff --git a/control-plane-pki-operator/certificatesigningcontroller/testdata/tls.key b/control-plane-pki-operator/certificatesigningcontroller/testdata/tls.key new file mode 100644 index 0000000000..93e2852a08 --- /dev/null +++ b/control-plane-pki-operator/certificatesigningcontroller/testdata/tls.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAsvQEzZL0YJ78J6ec1EioI4eBCQ9Ja2t7tACJXupmKnq91Haf +z5SyI44QM9qOhCfcZyEW70hEqeC5VgyX69qv3bBszqMNe615xDdScmqjgUwNJ5NJ +vRLp/boXiGDKt3Hwvopk9DBqDq0O/y/y4Eru7O+ZuMlZ3ETab6YEt31ZV4wVVaNs +lQytM3nTQkcas4L5PD6xA/1wMc+iLeJxxQbM2aQf1KKFkkV2iBiz8Z+0sUUafrsw +bNmk+FoIFesfmIPso7CbrirYZpFxDMLpdO2GHxPeVEWfmMofh5u8Lr+lEUgHOfNm +QLQOzpH+YebiS8Rkl/YOkO+zEubgjswZJN3M8wIDAQABAoIBAQClFPhtT/iCPYet +aTECn/gDtpfxbJm1L5URK0GOPrt6ynndyoSIcMqYhBAsVZ0NCtyGgn7uxAMbl0RB +viJToAyGfJ8TTFU+13wx5zr2c6tbtnWYIYZvlkgnGQlmdKvs6H5Gt9KDdToRSdJA +1NG/2UBpcGqljZGI4jeDsWo+frLxT6tOSJRnFGf1wrKCGSO4fkJWQVpQ2yKg8kGn +JOZQTdmejkjYeOZ9gLrd9kLzCn5ovHTYt7wBxEByxgxg9fh2REWbA6DZPX88g0jz +tyLp6LcG6UX79G9acG0XSb9T4XcAvUJP5jaKr7Jdk1ZmuVpyJ50xpSVvCWNJ79Vs +h4xloP3JAoGBAMOw8u9+JE8soj40nLNwWaH6MUS6b1J2sOyEgKVgKdriWtNRfwxg +0nT61ucrKwNq9miEThvLGtJ7RN6lJQq0YmY9ftpHoGuOPDUdfH65HwFFJZ4X3Org +vVOpctmQE5uX5+L7U/9e/TZMOQ4C/hFnmX3hY2dPFRRIONyqVbFMVagdAoGBAOoa +hnJLDRNEdngS7AqzGSubhLmnRxtwlQEAvWmKbQV+TRDYnZO/7HS6qpXJLCWGeNiq +1nsGvC7q0/9xjSkrMsFf+izVGqOLkKZCxNUshRpaZbVb4bLj5OBUaAsMHqvsHl0s +95gqnf1QRKxSzNAl6kLrskaPM1ez90CKWIg77txPAoGAPl1oLrcOr0TUN+rgfbcy +eZKYnQSlcaxt2hKoRQwOirlUpL/2M2Wv7KP8VRPG04IFIW34zpa955Jtcl9DHNQ7 +/8VdZgcpst1ThsHs6R3qKad1w5prR1d0PvNjrL5j4VRaDFZ4gIwvOly0WijN+5H+ +ssVfvo7PcvVJWdnXEXf4XGkCgYA1Pe0f51PE8wgijOMkF9F8qnUIKDQy2Gr6/GkX +rMTYv/3U+/7ykG69qYqMYGFq82del5QKDOEVppCqgu/A0jNL6YEjWyAg2+f8+Ch9 +9w8ajD6ffZMaNVxjbK7w/EOphBzvwf9Zmy+tYekMbBRqroTVzXcRNxZNNv/frNcv +vLm5XwKBgQC3vi/tHoeRIrmYutEhVAk7YY720T3aCbcJM9bCfB5HkcRsnr4cUHGs +o7tmpjINO1kVLT+nZBrguiPE90UR7Cu+5djr7mO4hjzTDr0kvgYK91+i8whN1Lmn +K3rGPBZJLbcddRF+MbNqsqh/57UBDV7+eELdWX5axj5+luJo7G9GGQ== +-----END RSA PRIVATE KEY----- diff --git a/control-plane-pki-operator/certificatesigningrequestapprovalcontroller/certificatesigningrequestapprovalcontroller.go b/control-plane-pki-operator/certificatesigningrequestapprovalcontroller/certificatesigningrequestapprovalcontroller.go new file mode 100644 index 0000000000..fde7a5f45b --- /dev/null +++ b/control-plane-pki-operator/certificatesigningrequestapprovalcontroller/certificatesigningrequestapprovalcontroller.go @@ -0,0 +1,139 @@ +package certificatesigningrequestapprovalcontroller + +import ( + "context" + "time" + + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + hypershiftinformers "github.com/openshift/hypershift/client/informers/externalversions" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +type CertificateSigningRequestApprovalController struct { + kubeClient kubernetes.Interface + + namespace, signerName string + getCSR func(name string) (*certificatesv1.CertificateSigningRequest, error) + getCSRA func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) +} + +func NewCertificateSigningRequestApprovalController( + hostedControlPlane *hypershiftv1beta1.HostedControlPlane, + signer certificates.SignerClass, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + hypershiftInformers hypershiftinformers.SharedInformerFactory, + kubeClient kubernetes.Interface, + eventRecorder events.Recorder, +) factory.Controller { + c := &CertificateSigningRequestApprovalController{ + kubeClient: kubeClient, + namespace: hostedControlPlane.Namespace, + signerName: certificates.SignerNameForHCP(hostedControlPlane, signer), + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + return kubeInformersForNamespaces.InformersFor(corev1.NamespaceAll).Certificates().V1().CertificateSigningRequests().Lister().Get(name) + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + return hypershiftInformers.Certificates().V1alpha1().CertificateSigningRequestApprovals().Lister().CertificateSigningRequestApprovals(namespace).Get(name) + }, + } + csrInformer := kubeInformersForNamespaces.InformersFor(corev1.NamespaceAll).Certificates().V1().CertificateSigningRequests().Informer() + csraInformer := hypershiftInformers.Certificates().V1alpha1().CertificateSigningRequestApprovals().Informer() + + return factory.New(). + WithInformersQueueKeysFunc(enqueueCertificateSigningRequest, csrInformer). + WithInformersQueueKeysFunc(enqueueCertificateSigningRequestApproval, csraInformer). + WithSync(c.syncCertificateSigningRequest). + ResyncEvery(time.Minute). + ToController(string(signer)+"-CertificateSigningRequestApprovalController", eventRecorder.WithComponentSuffix(string(signer)+"-certificate-signing-request-approval-controller")) +} + +func enqueueCertificateSigningRequest(obj runtime.Object) []string { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + return []string{key} +} + +func enqueueCertificateSigningRequestApproval(obj runtime.Object) []string { + // by convention, an approval is tied to a CertificateSingingRequest by name only + // we're OK to just use the full queue key since the sync will throw away the namespace + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + klog.ErrorS(err, "could not determine queue key") + return nil + } + return []string{key} +} + +func (c *CertificateSigningRequestApprovalController) syncCertificateSigningRequest(ctx context.Context, syncContext factory.SyncContext) error { + _, name, err := cache.SplitMetaNamespaceKey(syncContext.QueueKey()) + if err != nil { + return err + } + + csr, requeue, err := c.processCertificateSigningRequest(name) + if err != nil { + return err + } + if requeue { + return factory.SyntheticRequeueError + } + if csr != nil { + syncContext.Recorder().Eventf("CertificateSigningRequestApproved", "%q in is approved", csr.Name) + _, err = c.kubeClient.CertificatesV1().CertificateSigningRequests().UpdateApproval(ctx, name, csr, metav1.UpdateOptions{}) + return err + } + + return nil +} + +func (c *CertificateSigningRequestApprovalController) processCertificateSigningRequest(name string) (*certificatesv1.CertificateSigningRequest, bool, error) { + csr, err := c.getCSR(name) + if apierrors.IsNotFound(err) { + return nil, false, nil // nothing to do + } + if err != nil { + return nil, false, err + } + + if csr.Spec.SignerName != c.signerName { + return nil, false, nil + } + + if approved, denied := certificates.GetCertApprovalCondition(&csr.Status); approved || denied { + return nil, false, nil + } + + _, approvalGetErr := c.getCSRA(c.namespace, name) + if approvalGetErr != nil && !apierrors.IsNotFound(approvalGetErr) { + return nil, false, approvalGetErr + } + if apierrors.IsNotFound(approvalGetErr) { + return nil, false, nil + } + + // a CertificateSigningRequestApproval resource exists and matches the CertificateSigningRequest, so we can approve it + csr = csr.DeepCopy() + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "ApprovalPresent", + Message: "The requisite approval resource exists.", + LastUpdateTime: metav1.Now(), + }) + return csr, false, nil +} diff --git a/control-plane-pki-operator/certificatesigningrequestapprovalcontroller/certificatesigningrequestapprovalcontroller_test.go b/control-plane-pki-operator/certificatesigningrequestapprovalcontroller/certificatesigningrequestapprovalcontroller_test.go new file mode 100644 index 0000000000..87083ee9b6 --- /dev/null +++ b/control-plane-pki-operator/certificatesigningrequestapprovalcontroller/certificatesigningrequestapprovalcontroller_test.go @@ -0,0 +1,249 @@ +package certificatesigningrequestapprovalcontroller + +import ( + "errors" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +func TestCertificateSigningRequestApprovalController_processCertificateSigningRequest(t *testing.T) { + for _, test := range []struct { + description string + namespace string + name string + signerName string + getCSR func(name string) (*certificatesv1.CertificateSigningRequest, error) + getCSRA func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) + expectedCSR *certificatesv1.CertificateSigningRequest + expectedErr bool + }{ + { + description: "no CSR found, no error", + namespace: "test-ns", + name: "test-csr", + signerName: "test-signer", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + return nil, apierrors.NewNotFound(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals").GroupResource(), name) + }, + expectedErr: false, // nothing to be done + }, + { + description: "wrong signer, no update", + namespace: "test-ns", + name: "test-csr", + signerName: "test-signer", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "wrong-one", + }, + }, nil + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + if namespace != "test-ns" || name != "test-csr" { + return nil, apierrors.NewNotFound(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals").GroupResource(), name) + } + return &certificatesv1alpha1.CertificateSigningRequestApproval{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, nil + }, + }, + { + description: "already approved, no update", + namespace: "test-ns", + name: "test-csr", + signerName: "test-signer", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "test-signer", + }, + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateApproved, + }}, + }, + }, nil + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + if namespace != "test-ns" || name != "test-csr" { + return nil, apierrors.NewNotFound(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals").GroupResource(), name) + } + return &certificatesv1alpha1.CertificateSigningRequestApproval{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, nil + }, + }, + { + description: "already denied, no update", + namespace: "test-ns", + name: "test-csr", + signerName: "test-signer", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "test-signer", + }, + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateDenied, + }}, + }, + }, nil + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + if namespace != "test-ns" || name != "test-csr" { + return nil, apierrors.NewNotFound(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals").GroupResource(), name) + } + return &certificatesv1alpha1.CertificateSigningRequestApproval{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, nil + }, + }, + { + description: "no CSRA, no update", + namespace: "test-ns", + name: "test-csr", + signerName: "test-signer", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "test-signer", + }, + }, nil + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + return nil, apierrors.NewNotFound(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals").GroupResource(), name) + }, + }, + { + description: "error getting CSRA, no update", + namespace: "test-ns", + name: "test-csr", + signerName: "test-signer", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "test-signer", + }, + }, nil + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + return nil, apierrors.NewForbidden(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals").GroupResource(), name, errors.New("oops")) + }, + expectedErr: true, + }, + { + description: "CSRA exists, update to approve", + namespace: "test-ns", + name: "test-csr", + signerName: "test-signer", + getCSR: func(name string) (*certificatesv1.CertificateSigningRequest, error) { + if name != "test-csr" { + return nil, apierrors.NewNotFound(certificatesv1.SchemeGroupVersion.WithResource("certificatesigningrequests").GroupResource(), name) + } + return &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "test-signer", + }, + }, nil + }, + getCSRA: func(namespace, name string) (*certificatesv1alpha1.CertificateSigningRequestApproval, error) { + if namespace != "test-ns" || name != "test-csr" { + return nil, apierrors.NewNotFound(hypershiftv1beta1.SchemeGroupVersion.WithResource("certificatesigningrequestapprovals").GroupResource(), name) + } + return &certificatesv1alpha1.CertificateSigningRequestApproval{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, nil + }, + expectedCSR: &certificatesv1.CertificateSigningRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-csr", + }, + Spec: certificatesv1.CertificateSigningRequestSpec{ + SignerName: "test-signer", + }, + Status: certificatesv1.CertificateSigningRequestStatus{ + Conditions: []certificatesv1.CertificateSigningRequestCondition{{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "ApprovalPresent", + Message: "The requisite approval resource exists.", + }}, + }, + }, + }, + } { + t.Run(test.description, func(t *testing.T) { + c := CertificateSigningRequestApprovalController{ + namespace: test.namespace, + signerName: test.signerName, + getCSR: test.getCSR, + getCSRA: test.getCSRA, + } + out, _, err := c.processCertificateSigningRequest(test.name) + if test.expectedErr && err == nil { + t.Errorf("expected an error but got none") + } else if !test.expectedErr && err != nil { + t.Errorf("expected no error but got: %v", err) + } + if diff := cmp.Diff(test.expectedCSR, out, cmpopts.IgnoreFields(certificatesv1.CertificateSigningRequestCondition{}, "LastUpdateTime")); diff != "" { + t.Errorf("got invalid CSR out: %v", diff) + } + }) + } +} diff --git a/control-plane-pki-operator/certrotationcontroller/certrotationcontroller.go b/control-plane-pki-operator/certrotationcontroller/certrotationcontroller.go index 7dccbb0386..2851c63139 100644 --- a/control-plane-pki-operator/certrotationcontroller/certrotationcontroller.go +++ b/control-plane-pki-operator/certrotationcontroller/certrotationcontroller.go @@ -9,10 +9,10 @@ import ( hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" hypershiftv1beta1client "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" "github.com/openshift/hypershift/control-plane-pki-operator/clienthelpers" pkimanifests "github.com/openshift/hypershift/control-plane-pki-operator/manifests" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/authentication/user" "k8s.io/client-go/kubernetes" @@ -57,7 +57,7 @@ func NewCertRotationController( UID: hostedControlPlane.UID, } - // we need the user info we're creating certificates for to be discernable as coming from us, + // we need the user info we're creating certificates for to be discernible as coming from us, // but not something that can be predicted by anyone - so, use a human-readable prefix and // crypto/rand for the rest randomString := func(bytes int) (string, error) { @@ -73,7 +73,6 @@ func NewCertRotationController( if err != nil { return nil, err } - userName := "customer-break-glass-" + userNameSuffix uid, err := randomString(128) if err != nil { return nil, err @@ -92,6 +91,8 @@ func NewCertRotationController( Client: kubeClient.CoreV1(), EventRecorder: eventRecorder, Owner: ownerRef, + JiraComponent: "HOSTEDCP", + Description: "Root signer for customer break-glass credentials.", }, certrotation.CABundleConfigMap{ Namespace: hostedControlPlane.Namespace, @@ -101,6 +102,8 @@ func NewCertRotationController( Client: kubeClient.CoreV1(), EventRecorder: eventRecorder, Owner: ownerRef, + JiraComponent: "HOSTEDCP", + Description: "Trust bundle for customer break-glass credentials.", }, certrotation.RotatedSelfSignedCertKeySecret{ Namespace: hostedControlPlane.Namespace, @@ -108,19 +111,77 @@ func NewCertRotationController( Validity: 36 * rotationDay / 24, Refresh: 6 * rotationDay / 24, CertCreator: &certrotation.ClientRotation{ - UserInfo: &user.DefaultInfo{Name: userName, UID: uid, Groups: []string{"system:masters"}}, + UserInfo: &user.DefaultInfo{ + Name: certificates.CommonNamePrefix(certificates.CustomerBreakGlassSigner) + userNameSuffix, + UID: uid, + Groups: []string{"system:masters"}, + }, }, Informer: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().Secrets(), Lister: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().Secrets().Lister(), Client: kubeClient.CoreV1(), EventRecorder: eventRecorder, Owner: ownerRef, + JiraComponent: "HOSTEDCP", + Description: "Client certificate for customer break-glass credentials.", }, eventRecorder, clienthelpers.NewHostedControlPlaneStatusReporter(hostedControlPlane.Name, hostedControlPlane.Namespace, hypershiftClient), ) ret.certRotators = append(ret.certRotators, certRotator) + sreRotatorName := "SREAdminKubeconfigSigner" + sreCertRotator := certrotation.NewCertRotationController( + sreRotatorName, + certrotation.RotatedSigningCASecret{ + Namespace: hostedControlPlane.Namespace, + Name: pkimanifests.SRESystemAdminSigner(hostedControlPlane.Namespace).Name, + Validity: 7 * rotationDay, + Refresh: 2 * rotationDay, + Informer: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().Secrets(), + Lister: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().Secrets().Lister(), + Client: kubeClient.CoreV1(), + EventRecorder: eventRecorder, + Owner: ownerRef, + JiraComponent: "HOSTEDCP", + Description: "Root signer for SRE break-glass credentials.", + }, + certrotation.CABundleConfigMap{ + Namespace: hostedControlPlane.Namespace, + Name: pkimanifests.SRESystemAdminSignerCA(hostedControlPlane.Namespace).Name, + Informer: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().ConfigMaps(), + Lister: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().ConfigMaps().Lister(), + Client: kubeClient.CoreV1(), + EventRecorder: eventRecorder, + Owner: ownerRef, + JiraComponent: "HOSTEDCP", + Description: "Trust bundle for SRE break-glass credentials.", + }, + certrotation.RotatedSelfSignedCertKeySecret{ + Namespace: hostedControlPlane.Namespace, + Name: pkimanifests.SRESystemAdminClientCertSecret(hostedControlPlane.Namespace).Name, + Validity: 36 * rotationDay / 24, + Refresh: 6 * rotationDay / 24, + CertCreator: &certrotation.ClientRotation{ + UserInfo: &user.DefaultInfo{ + Name: certificates.CommonNamePrefix(certificates.SREBreakGlassSigner) + userNameSuffix, + UID: uid, + Groups: []string{"system:masters"}, + }, + }, + Informer: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().Secrets(), + Lister: kubeInformersForNamespaces.InformersFor(hostedControlPlane.Namespace).Core().V1().Secrets().Lister(), + Client: kubeClient.CoreV1(), + EventRecorder: eventRecorder, + Owner: ownerRef, + JiraComponent: "HOSTEDCP", + Description: "Client certificate for SRE break-glass credentials.", + }, + eventRecorder, + clienthelpers.NewHostedControlPlaneStatusReporter(hostedControlPlane.Name, hostedControlPlane.Namespace, hypershiftClient), + ) + ret.certRotators = append(ret.certRotators, sreCertRotator) + return ret, nil } @@ -134,20 +195,6 @@ func (c *CertRotationController) WaitForReady(stopCh <-chan struct{}) { } } -// RunOnce will run the cert rotation logic, but will not try to update the static pod status. -// This eliminates the need to pass an OperatorClient and avoids dubious writes and status. -func (c *CertRotationController) RunOnce() error { - errlist := []error{} - runOnceCtx := context.WithValue(context.Background(), certrotation.RunOnceContextKey, true) - for _, certRotator := range c.certRotators { - if err := certRotator.Sync(runOnceCtx, factory.NewSyncContext("CertRotationController", c.recorder)); err != nil { - errlist = append(errlist, err) - } - } - - return utilerrors.NewAggregate(errlist) -} - func (c *CertRotationController) Run(ctx context.Context, workers int) { klog.Infof("Starting CertRotation") defer klog.Infof("Shutting down CertRotation") diff --git a/control-plane-pki-operator/clienthelpers/conditions.go b/control-plane-pki-operator/clienthelpers/conditions.go index 3d3cb1503b..80d1bf3ddd 100644 --- a/control-plane-pki-operator/clienthelpers/conditions.go +++ b/control-plane-pki-operator/clienthelpers/conditions.go @@ -11,6 +11,7 @@ import ( "github.com/openshift/library-go/pkg/operator/certrotation" "github.com/openshift/library-go/pkg/operator/condition" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1applyconfigurations "k8s.io/client-go/applyconfigurations/meta/v1" ) func NewHostedControlPlaneStatusReporter(name, namespace string, client hypershiftv1beta1client.HostedControlPlanesGetter) *HostedControlPlaneStatusReporter { @@ -66,7 +67,13 @@ func UpdateHostedControlPlaneStatusCondition(ctx context.Context, newCondition m } cfg := hypershiftv1beta1applyconfigurations.HostedControlPlane(name, namespace). - WithStatus(hypershiftv1beta1applyconfigurations.HostedControlPlaneStatus().WithConditions(newCondition)) + WithStatus(hypershiftv1beta1applyconfigurations.HostedControlPlaneStatus().WithConditions(metav1applyconfigurations.Condition(). + WithType(newCondition.Type). + WithStatus(newCondition.Status). + WithLastTransitionTime(newCondition.LastTransitionTime). + WithReason(newCondition.Reason). + WithMessage(newCondition.Message), + )) _, updateErr := client.HostedControlPlanes(namespace).ApplyStatus(ctx, cfg, metav1.ApplyOptions{FieldManager: fieldManager}) return true, updateErr diff --git a/control-plane-pki-operator/manifests/pki.go b/control-plane-pki-operator/manifests/pki.go index 77ba14d66a..3568367373 100644 --- a/control-plane-pki-operator/manifests/pki.go +++ b/control-plane-pki-operator/manifests/pki.go @@ -31,6 +31,23 @@ func CustomerSystemAdminClientCertSecret(ns string) *corev1.Secret { return secretFor(ns, "customer-system-admin-client-cert-key") } +func SRESystemAdminSigner(ns string) *corev1.Secret { + return secretFor(ns, "sre-system-admin-signer") +} + +func SRESystemAdminSignerCA(ns string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sre-system-admin-signer-ca", + Namespace: ns, + }, + } +} + +func SRESystemAdminClientCertSecret(ns string) *corev1.Secret { + return secretFor(ns, "sre-system-admin-client-cert-key") +} + func TotalKASClientCABundle(ns string) *corev1.ConfigMap { return &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ diff --git a/control-plane-pki-operator/operator.go b/control-plane-pki-operator/operator.go index 21e80df995..4025444778 100644 --- a/control-plane-pki-operator/operator.go +++ b/control-plane-pki-operator/operator.go @@ -4,13 +4,21 @@ import ( "context" "fmt" "os" + "time" - hypershiftv1beta1 "github.com/openshift/hypershift/client/clientset/clientset/typed/hypershift/v1beta1" + hypershiftclient "github.com/openshift/hypershift/client/clientset/clientset" + hypershiftinformers "github.com/openshift/hypershift/client/informers/externalversions" + "github.com/openshift/hypershift/control-plane-pki-operator/certificaterevocationcontroller" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + "github.com/openshift/hypershift/control-plane-pki-operator/certificatesigningcontroller" + "github.com/openshift/hypershift/control-plane-pki-operator/certificatesigningrequestapprovalcontroller" "github.com/openshift/hypershift/control-plane-pki-operator/certrotationcontroller" "github.com/openshift/hypershift/control-plane-pki-operator/config" + "github.com/openshift/hypershift/control-plane-pki-operator/manifests" "github.com/openshift/hypershift/control-plane-pki-operator/targetconfigcontroller" "github.com/openshift/library-go/pkg/controller/controllercmd" "github.com/openshift/library-go/pkg/operator/v1helpers" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) @@ -38,23 +46,25 @@ func RunOperator(ctx context.Context, controllerContext *controllercmd.Controlle if err != nil { return err } - hypershiftClient, err := hypershiftv1beta1.NewForConfig(controllerContext.KubeConfig) + hypershiftClient, err := hypershiftclient.NewForConfig(controllerContext.KubeConfig) if err != nil { return err } kubeInformersForNamespaces := v1helpers.NewKubeInformersForNamespaces( kubeClient, namespace, + corev1.NamespaceAll, ) + hypershiftInformerFactory := hypershiftinformers.NewSharedInformerFactoryWithOptions(hypershiftClient, 10*time.Minute, hypershiftinformers.WithNamespace(namespace)) - hcp, err := hypershiftClient.HostedControlPlanes(namespace).Get(ctx, name, metav1.GetOptions{}) + hcp, err := hypershiftClient.HypershiftV1beta1().HostedControlPlanes(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return err } targetConfigReconciler := targetconfigcontroller.NewTargetConfigController( hcp, - hypershiftClient, + hypershiftClient.HypershiftV1beta1(), kubeInformersForNamespaces, kubeClient, controllerContext.EventRecorder, @@ -63,19 +73,86 @@ func RunOperator(ctx context.Context, controllerContext *controllercmd.Controlle certRotationController, err := certrotationcontroller.NewCertRotationController( hcp, kubeClient, - hypershiftClient, + hypershiftClient.HypershiftV1beta1(), kubeInformersForNamespaces, - controllerContext.EventRecorder.WithComponentSuffix("cert-rotation-controller"), + controllerContext.EventRecorder, certRotationScale, ) if err != nil { return err } + customerCertSigningRequestApprovalController := certificatesigningrequestapprovalcontroller.NewCertificateSigningRequestApprovalController( + hcp, + certificates.CustomerBreakGlassSigner, + kubeInformersForNamespaces, + hypershiftInformerFactory, + kubeClient, + controllerContext.EventRecorder, + ) + sreCertSigningRequestApprovalController := certificatesigningrequestapprovalcontroller.NewCertificateSigningRequestApprovalController( + hcp, + certificates.SREBreakGlassSigner, + kubeInformersForNamespaces, + hypershiftInformerFactory, + kubeClient, + controllerContext.EventRecorder, + ) + + certRevocationController := certificaterevocationcontroller.NewCertificateRevocationController( + hcp, + kubeInformersForNamespaces, + hypershiftInformerFactory, + kubeClient, + hypershiftClient, + controllerContext.EventRecorder, + ) + + customerSystemAdminSigner := manifests.CustomerSystemAdminSigner(namespace) + currentCustomerCA, customerCertLoadingController := certificatesigningcontroller.NewCertificateLoadingController( + customerSystemAdminSigner.Namespace, customerSystemAdminSigner.Name, + kubeInformersForNamespaces, + controllerContext.EventRecorder, + ) + + customerBreakGlassCertSigningController := certificatesigningcontroller.NewCertificateSigningController( + hcp, + certificates.CustomerBreakGlassSigner, + currentCustomerCA, + kubeInformersForNamespaces, + kubeClient, + controllerContext.EventRecorder, + 36*certRotationScale/24, + ) + + sreSystemAdminSigner := manifests.SRESystemAdminSigner(namespace) + currentSRECA, sreCertLoadingController := certificatesigningcontroller.NewCertificateLoadingController( + sreSystemAdminSigner.Namespace, sreSystemAdminSigner.Name, + kubeInformersForNamespaces, + controllerContext.EventRecorder, + ) + sreBreakGlassCertSigningController := certificatesigningcontroller.NewCertificateSigningController( + hcp, + certificates.SREBreakGlassSigner, + currentSRECA, + kubeInformersForNamespaces, + kubeClient, + controllerContext.EventRecorder, + 36*certRotationScale/24, + ) + kubeInformersForNamespaces.Start(ctx.Done()) + hypershiftInformerFactory.Start(ctx.Done()) go targetConfigReconciler.Run(ctx, 1) go certRotationController.Run(ctx, 1) + go customerCertSigningRequestApprovalController.Run(ctx, 1) + go sreCertSigningRequestApprovalController.Run(ctx, 1) + go customerCertLoadingController.Run(ctx, 1) + go sreCertLoadingController.Run(ctx, 1) + go customerBreakGlassCertSigningController.Run(ctx, 1) + go sreBreakGlassCertSigningController.Run(ctx, 1) + go certRevocationController.Run(ctx, 1) <-ctx.Done() return nil diff --git a/control-plane-pki-operator/targetconfigcontroller/targetconfigcontroller.go b/control-plane-pki-operator/targetconfigcontroller/targetconfigcontroller.go index 2bb99b796b..530a21f42a 100644 --- a/control-plane-pki-operator/targetconfigcontroller/targetconfigcontroller.go +++ b/control-plane-pki-operator/targetconfigcontroller/targetconfigcontroller.go @@ -105,6 +105,8 @@ func ManageClientCABundle(ctx context.Context, lister corev1listers.ConfigMapLis "HOSTEDCP", "Kubernetes total client CA bundle.", // this bundle is what this operator uses to mint new customer client certs it directly manages resourcesynccontroller.ResourceLocation{Namespace: owner.Namespace, Name: pkimanifests.CustomerSystemAdminSignerCA(owner.Namespace).Name}, + // this bundle is what this operator uses to mint new SRE client certs it directly manages + resourcesynccontroller.ResourceLocation{Namespace: owner.Namespace, Name: pkimanifests.SRESystemAdminSignerCA(owner.Namespace).Name}, ) if err != nil { return nil, false, err diff --git a/docs/Dockerfile b/docs/Dockerfile index f0ca732492..62237c5030 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -1,2 +1,2 @@ -FROM squidfunk/mkdocs-material:8.2.8 +FROM squidfunk/mkdocs-material:9.5.47 RUN pip install mkdocs-mermaid2-plugin mkdocs-glightbox diff --git a/docs/content/how-to/disconnected/known-issues.md b/docs/content/how-to/disconnected/known-issues.md index 959c843497..166274a939 100644 --- a/docs/content/how-to/disconnected/known-issues.md +++ b/docs/content/how-to/disconnected/known-issues.md @@ -9,4 +9,38 @@ When you work in a disconnected environment the OLM catalog sources will be stil The most practical one is the first choice. To proceed with this option, you will need to follow [these instructions](https://docs.openshift.com/container-platform/4.14/installing/disconnected_install/installing-mirroring-disconnected.html). The process will make sure all the images get mirrored and also the ICSP will be generated properly. -Additionally when you're provisioning the HostedCluster you will need to add a flag to indicate that the OLMCatalogPlacement is set to `Guest` because if that's not set, you will not be able to disable them. \ No newline at end of file +Additionally when you're provisioning the HostedCluster you will need to add a flag to indicate that the OLMCatalogPlacement is set to `Guest` because if that's not set, you will not be able to disable them. + +## Hypershift operator is failing to reconcile in Disconnected environments + +If you are operating in a disconnected environment and have deployed the Hypershift operator, you may encounter an issue with the UWM telemetry writer. Essentially, it exposes Openshift deployment data in your RedHat account, but this functionality does not operate in a disconnected environments. + +**Symptoms:** + +- The Hypershift operator appears to be running correctly in the `hypershift` namespace but even if you creates the Hosted Cluster nothing happens. +- There will be a couple of log entries in the Hypershift operator: + +``` +{"level":"error","ts":"2023-12-20T15:23:01Z","msg":"Reconciler error","controller":"deployment","controllerGroup":"apps","controllerKind":"Deployment","Deployment":{"name":"operator","namespace":"hypershift"},"namespace":"hypershift","name":"operator","reconcileID":"451fde3c-eb1b-4cf0-98cb-ad0f8c6a6288","error":"cannot get telemeter client secret: Secret \"telemeter-client\" not found","stacktrace":"sigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).reconcileHandler\n\t/hypershift/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:329\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).processNextWorkItem\n\t/hypershift/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:266\nsigs.k8s.io/controller-runtime/pkg/internal/controller.(*Controller).Start.func2.2\n\t/hypershift/vendor/sigs.k8s.io/controller-runtime/pkg/internal/controller/controller.go:227"} + +{"level":"debug","ts":"2023-12-20T15:23:01Z","logger":"events","msg":"Failed to ensure UWM telemetry remote write: cannot get telemeter client secret: Secret \"telemeter-client\" not found","type":"Warning","object":{"kind":"Deployment","namespace":"hypershift","name":"operator","uid":"c6628a3c-a597-4e32-875a-f5704da2bdbb","apiVersion":"apps/v1","resourceVersion":"4091099"},"reason":"ReconcileError"} +``` + +**Solution:** + +To resolve this issue, the solution will depend on how you deployed Hypershift: + +- **The HO was deployed using ACM/MCE:** In this case you will need to create a ConfigMap in the `local-cluster` namespace (the namespace and ConfigMap name cannot be changed) called `hypershift-operator-install-flags` with this content: + +``` +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: hypershift-operator-install-flags + namespace: local-cluster +data: + installFlagsToRemove: --enable-uwm-telemetry-remote-write +``` + +- **The HO was deployed using the Hypershift binary:** In this case you will just need to remove the flag `--enable-uwm-telemetry-remote-write` from the hypershift deployment command. diff --git a/docs/content/how-to/metrics-sets.md b/docs/content/how-to/metrics-sets.md index 0fd5b6a0f4..1002c7dc4f 100644 --- a/docs/content/how-to/metrics-sets.md +++ b/docs/content/how-to/metrics-sets.md @@ -3,8 +3,8 @@ HyperShift creates ServiceMonitor resources in each control plane namespace that allow a Prometheus stack to scrape metrics from the control planes. ServiceMonitors use metrics relabelings to define which metrics are included or excluded from a particular component (etcd, Kube API server, etc) -The number of metrics produced by control planes has a direct impact on resource requirements of -the monitoring stack scraping them. +The number of metrics produced by control planes has a direct impact on resource requirements of +the monitoring stack scraping them. Instead of producing a fixed number of metrics that apply to all situations, HyperShift allows configuration of a "metrics set" that identifies a set of metrics to produce per control plane. @@ -13,7 +13,7 @@ The following metrics sets are supported: * `Telemetry` - metrics needed for telemetry. This is the default and the smallest set of metrics. -* `SRE` - Configurable metrics set, intended to include necessary metrics to produce alerts and +* `SRE` - Configurable metrics set, intended to include necessary metrics to produce alerts and allow troubleshooting of control plane components. * `All` - all the metrics produced by standalone OCP control plane components. diff --git a/docs/content/reference/api.md b/docs/content/reference/api.md index a490a9405f..20bb7039cf 100644 --- a/docs/content/reference/api.md +++ b/docs/content/reference/api.md @@ -21,6 +21,79 @@ OpenShift clusters at scale.

worker nodes and their kubelets, and the infrastructure on which they run). This enables “hosted control plane as a service” use cases.

+##CertificateSigningRequestApproval { #hypershift.openshift.io/v1beta1.CertificateSigningRequestApproval } +

+

CertificateSigningRequestApproval defines the desired state of CertificateSigningRequestApproval

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+apiVersion
+string
+ +hypershift.openshift.io/v1beta1 + +
+kind
+string +
CertificateSigningRequestApproval
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +CertificateSigningRequestApprovalSpec + + +
+
+
+ +
+
+status
+ + +CertificateSigningRequestApprovalStatus + + +
+
##HostedCluster { #hypershift.openshift.io/v1beta1.HostedCluster }

HostedCluster is the primary representation of a HyperShift cluster and encapsulates @@ -2382,6 +2455,22 @@ string

+###CertificateSigningRequestApprovalSpec { #hypershift.openshift.io/v1beta1.CertificateSigningRequestApprovalSpec } +

+(Appears on: +CertificateSigningRequestApproval) +

+

+

CertificateSigningRequestApprovalSpec defines the desired state of CertificateSigningRequestApproval

+

+###CertificateSigningRequestApprovalStatus { #hypershift.openshift.io/v1beta1.CertificateSigningRequestApprovalStatus } +

+(Appears on: +CertificateSigningRequestApproval) +

+

+

CertificateSigningRequestApprovalStatus defines the observed state of CertificateSigningRequestApproval

+

###ClusterAutoscaling { #hypershift.openshift.io/v1beta1.ClusterAutoscaling }

(Appears on: @@ -2590,6 +2679,21 @@ This configuration is only honored when the top level Authentication config has +operatorhub
+ + +github.com/openshift/api/config/v1.OperatorHubSpec + + + + +(Optional) +

OperatorHub specifies the configuration for the Operator Lifecycle Manager in the HostedCluster. This is only configured at deployment time but the controller are not reconcilling over it. +The OperatorHub configuration will be constantly reconciled if catalog placement is management, but only on cluster creation otherwise.

+ + + + scheduler
@@ -2877,6 +2981,12 @@ for AWS workers has been created. A failure here indicates that NodePools without a security group will be blocked from creating machines.

+

"AWSDefaultSecurityGroupDeleted"

+

AWSDefaultSecurityGroupDeleted indicates whether the default security group +for AWS workers has been deleted. +A failure here indicates that the Security Group has some dependencies that +there are still pending cloud resources to be deleted that are using that SG.

+

"AWSEndpointAvailable"

AWSEndpointServiceAvailable indicates whether the AWS Endpoint has been created in the guest VPC

@@ -3029,6 +3139,11 @@ A failure here is unlikely to resolve without the changing user input.

supported by the underlying management cluster. A failure here is unlikely to resolve without the changing user input.

+

"ValidIDPConfiguration"

+

ValidIDPConfiguration indicates if the Identity Provider configuration is valid. +A failure here may require external user intervention to resolve +e.g. the user-provided IDP configuration provided is invalid or the IDP is not reachable.

+

"ValidKubeVirtInfraNetworkMTU"

ValidKubeVirtInfraNetworkMTU indicates if the MTU configured on an infra cluster hosting a guest cluster utilizing kubevirt platform is a sufficient value that will avoid @@ -5245,6 +5360,20 @@ bool this can only be set to false if AdditionalNetworks are configured

+ + +nodeSelector
+ +map[string]string + + + +(Optional) +

NodeSelector is a selector which must be true for the kubevirt VirtualMachine to fit on a node. +Selector which must match a node’s labels for the VM to be scheduled on that node. More info: +https://kubernetes.io/docs/concepts/configuration/assign-pod-node/

+ + ###KubevirtPersistentVolume { #hypershift.openshift.io/v1beta1.KubevirtPersistentVolume } diff --git a/docs/requirements.txt b/docs/requirements.txt index de13a0120a..f92592807f 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,4 +1,4 @@ -mkdocs==1.3.0 +mkdocs==1.6.1 mkdocs-material==8.2.8 mkdocs-mermaid2-plugin==0.5.2 mkdocs-glightbox==0.3.0 diff --git a/etcd-defrag/etcddefrag_controller.go b/etcd-defrag/etcddefrag_controller.go new file mode 100644 index 0000000000..6e5d4d553b --- /dev/null +++ b/etcd-defrag/etcddefrag_controller.go @@ -0,0 +1,275 @@ +package etcddefrag + +import ( + "context" + "errors" + "fmt" + "math" + "time" + + "github.com/go-logr/logr" + "go.etcd.io/etcd/api/v3/etcdserverpb" + clientv3 "go.etcd.io/etcd/client/v3" + + "k8s.io/apimachinery/pkg/util/wait" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/openshift/hypershift/pkg/etcdcli" + "github.com/openshift/hypershift/support/upsert" + "github.com/openshift/library-go/pkg/operator/events" +) + +const ( + pollWaitDuration = 2 * time.Second + pollTimeoutDuration = 60 * time.Second + maxDefragFailuresBeforeDegrade = 3 + minDefragBytes int64 = 100 * 1024 * 1024 // 100MB + minDefragWaitDuration = 36 * time.Second + maxFragmentedPercentage float64 = 45 + + controllerRequeueDuration = 10 * time.Minute +) + +type DefragController struct { + client.Client + log logr.Logger + + ControllerName string + upsert.CreateOrUpdateProvider + + etcdClient etcdcli.EtcdClient + numDefragFailures int + defragWaitDuration time.Duration +} + +type defragTicker struct { + defrag *DefragController +} + +func (r *DefragController) setupTicker(mgr manager.Manager) error { + ticker := defragTicker{ + defrag: r, + } + if err := mgr.Add(&ticker); err != nil { + return fmt.Errorf("failed to add defrag ticker runnable to manager: %w", err) + } + return nil +} + +func (m *defragTicker) Start(ctx context.Context) error { + ticker := time.NewTicker(controllerRequeueDuration) + + for { + select { + case <-ctx.Done(): + return nil + case <-ticker.C: + m.defrag.log.Info("Running defrag.") + if err := m.defrag.runDefrag(ctx); err != nil { + m.defrag.log.Error(err, "failed to run defragmentation cycle") + } + } + } +} + +func (r *DefragController) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + endpointsFunc := func() ([]string, error) { + return r.etcdEndpoints(ctx) + } + r.etcdClient = etcdcli.NewEtcdClient(endpointsFunc, events.NewLoggingEventRecorder(r.ControllerName)) + + // Set this so that it will immediately requeue itself. + r.defragWaitDuration = minDefragWaitDuration + + if err := r.setupTicker(mgr); err != nil { + return fmt.Errorf("failed to set up ticker: %w", err) + } + + return nil +} + +func (r *DefragController) etcdEndpoints(ctx context.Context) ([]string, error) { + var eplist []string + + // Because we are part of the etcd pod, we can just use localhost. + // The client itself will discover the other endpoints. + eplist = append(eplist, "https://localhost:2379") + return eplist, nil +} + +/* + +Everything from here down is from the cluster-etcd-controller code. +It's been modified mostly to replace 'c' with 'r' as the object name. +Also the logging has been changed. + +https://github.com/openshift/cluster-etcd-operator/blob/master/pkg/operator/defragcontroller/defragcontroller.go + +https://github.com/openshift/cluster-etcd-operator/tree/master/pkg/etcdcli + +*/ + +func (r *DefragController) runDefrag(ctx context.Context) error { + // Do not defrag if any of the cluster members are unhealthy. + members, err := r.etcdClient.MemberList(ctx) + if err != nil { + return err + } + r.log.Info("Checking status for Defrag", "members", members) + for _, m := range members { + status, err := r.etcdClient.Status(ctx, m.ClientURLs[0]) + if err != nil { + r.log.Error(err, "Member returned error", "member", m) + } else { + fragmentedPercentage := checkFragmentationPercentage(status.DbSize, status.DbSizeInUse) + r.log.Info("Member", "name", m.Name, "URL", m.ClientURLs[0], "fragmentation percentage", fragmentedPercentage, "DBSize on disk", status.DbSize, "DBSize in use", status.DbSizeInUse, "leader", status.Leader) + } + } + + memberHealth, err := r.etcdClient.MemberHealth(ctx) + if err != nil { + return err + } + + if !etcdcli.IsClusterHealthy(memberHealth) { + r.log.Error(err, "Cluster is unhealthy", "status", memberHealth.Status()) + return fmt.Errorf("cluster is unhealthy, status: %s", memberHealth.Status()) + } + + // filter out learner members since they don't support the defragment API call + var etcdMembers []*etcdserverpb.Member + for _, m := range members { + if !m.IsLearner { + etcdMembers = append(etcdMembers, m) + } + } + + var endpointStatus []*clientv3.StatusResponse + var leader *clientv3.StatusResponse + for _, member := range etcdMembers { + if len(member.ClientURLs) == 0 { + // skip unstarted member + continue + } + status, err := r.etcdClient.Status(ctx, member.ClientURLs[0]) + if err != nil { + return err + } + if leader == nil && status.Leader == member.ID { + leader = status + continue + } + endpointStatus = append(endpointStatus, status) + } + + // Leader last if possible. + if leader != nil { + r.log.Info("Appending leader last", "ID", leader.Header.MemberId) + endpointStatus = append(endpointStatus, leader) + } + + successfulDefrags := 0 + var errs []error + for _, status := range endpointStatus { + member, err := getMemberFromStatus(etcdMembers, status) + if err != nil { + errs = append(errs, err) + continue + } + + // Check each member's status which includes the db size on disk "DbSize" and the db size in use "DbSizeInUse" + // compare the % difference and if that difference is over the max diff threshold and also above the minimum + // db size we defrag the members state file. In the case where this command only partially completed controller + // can clean that up on the next sync. Having the db sizes slightly different is not a problem in itself. + if r.isEndpointBackendFragmented(member, status) { + fragmentedPercentage := checkFragmentationPercentage(status.DbSize, status.DbSizeInUse) + r.log.Info("Member is over defrag threshold", "name", member.Name, "URL", member.ClientURLs[0], "fragmentation percentage", fragmentedPercentage, "DBSize on disk", status.DbSize, "DBSize in use", status.DbSizeInUse, "leader", status.Leader) + if _, err := r.etcdClient.Defragment(ctx, member); err != nil { + // Defrag can timeout if defragmentation takes longer than etcdcli.DefragDialTimeout. + r.log.Error(err, "DefragController Defragment Failed", "member", member.Name, "ID", member.ID) + errMsg := fmt.Sprintf("failed defrag on member: %s, memberID: %x: %v", member.Name, member.ID, err) + errs = append(errs, fmt.Errorf(errMsg)) + continue + } + + r.log.Info("DefragController Defragment Success", "member", member.Name, "ID", member.ID) + successfulDefrags++ + + // Give cluster time to recover before we move to the next member. + if err := wait.Poll( + pollWaitDuration, + pollTimeoutDuration, + func() (bool, error) { + // Ensure defragmentation attempts have clear observable signal. + r.log.Info("Sleeping to allow cluster to recover before defragging next member", "waiting", r.defragWaitDuration) + time.Sleep(r.defragWaitDuration) + + memberHealth, err := r.etcdClient.MemberHealth(ctx) + if err != nil { + r.log.Error(err, "Failed checking member health") + return false, nil + } + if !etcdcli.IsClusterHealthy(memberHealth) { + r.log.Info("Cluster member is unhealthy", "member status", memberHealth.Status()) + return false, nil + } + return true, nil + }); err != nil { + errs = append(errs, fmt.Errorf("timeout waiting for cluster to stabilize after defrag: %w", err)) + } + } else { + // no fragmentation needed is also a success + successfulDefrags++ + } + } + + if successfulDefrags != len(endpointStatus) { + r.numDefragFailures++ + r.log.Info("DefragController Defragment Partial Failure", "successfully defragged", successfulDefrags, "of members", len(endpointStatus), "tries remaining", maxDefragFailuresBeforeDegrade-r.numDefragFailures) + + // TODO: This should bubble up to HCP condition errors. + return errors.Join(errs...) + } + + if len(errs) > 0 { + r.log.Info("found errors even though all members have been successfully defragmented", "error", errors.Join(errs...)) + } + + return nil +} + +// isEndpointBackendFragmented checks the status of all cluster members to ensure that no members have a fragmented store. +// This can happen if the operator starts defrag of the cluster but then loses leader status and is rescheduled before +// the operator can defrag all members. +func (r *DefragController) isEndpointBackendFragmented(member *etcdserverpb.Member, endpointStatus *clientv3.StatusResponse) bool { + if endpointStatus == nil { + r.log.Error(nil, "endpoint status validation failed", "status", endpointStatus) + return false + } + fragmentedPercentage := checkFragmentationPercentage(endpointStatus.DbSize, endpointStatus.DbSizeInUse) + + r.log.Info("Etcd member backend store fragmentation status", "name", member.Name, "URL", member.ClientURLs[0], "fragmentation percentage", fragmentedPercentage, "DBSize on disk", endpointStatus.DbSize, "DBSize in use", endpointStatus.DbSizeInUse) + + return fragmentedPercentage >= maxFragmentedPercentage && endpointStatus.DbSize >= minDefragBytes +} + +func checkFragmentationPercentage(ondisk, inuse int64) float64 { + diff := float64(ondisk - inuse) + fragmentedPercentage := (diff / float64(ondisk)) * 100 + return math.Round(fragmentedPercentage*100) / 100 +} + +func getMemberFromStatus(members []*etcdserverpb.Member, endpointStatus *clientv3.StatusResponse) (*etcdserverpb.Member, error) { + if endpointStatus == nil { + return nil, fmt.Errorf("endpoint status validation failed: %v", endpointStatus) + } + for _, member := range members { + if member.ID == endpointStatus.Header.MemberId { + return member, nil + } + } + return nil, fmt.Errorf("no member found in MemberList matching ID: %v", endpointStatus.Header.MemberId) +} diff --git a/etcd-defrag/main.go b/etcd-defrag/main.go new file mode 100644 index 0000000000..f1e8f2b040 --- /dev/null +++ b/etcd-defrag/main.go @@ -0,0 +1,100 @@ +package etcddefrag + +import ( + "context" + "fmt" + "log" + "os" + "os/signal" + "syscall" + "time" + + "github.com/openshift/hypershift/pkg/version" + hyperapi "github.com/openshift/hypershift/support/api" + "github.com/openshift/hypershift/support/upsert" + "github.com/spf13/cobra" + "go.uber.org/zap/zapcore" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +type Options struct { + Namespace string +} + +func NewStartCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "etcd-defrag-controller", + Short: "Starts the etcd defrag controller", + } + + opts := Options{ + Namespace: "", + } + + cmd.Flags().StringVar(&opts.Namespace, "namespace", os.Getenv("MY_NAMESPACE"), "The namespace this operator lives in (required)") + + cmd.Run = func(cmd *cobra.Command, args []string) { + ctx, cancel := context.WithCancel(context.Background()) + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGINT) + go func() { + <-sigs + cancel() + }() + + if err := run(ctx, opts); err != nil { + log.Fatal(err) + os.Exit(1) + } + } + + return cmd +} + +func run(ctx context.Context, opts Options) error { + logger := zap.New(zap.UseDevMode(true), zap.JSONEncoder(func(o *zapcore.EncoderConfig) { + o.EncodeTime = zapcore.RFC3339TimeEncoder + })) + ctrl.SetLogger(logger) + logger.Info("Starting etcd-defrag-controller", "version", version.String(), "namespace", opts.Namespace) + leaseDuration := time.Minute * 5 + renewDeadline := time.Minute * 4 + retryPeriod := time.Second * 30 + + restConfig := ctrl.GetConfigOrDie() + restConfig.UserAgent = "etcd-defrag-controller" + mgr, err := ctrl.NewManager(restConfig, ctrl.Options{ + Scheme: hyperapi.Scheme, + LeaderElection: true, + LeaderElectionID: "etcd-defrag-controller-leader-elect", + LeaderElectionResourceLock: "leases", + LeaderElectionNamespace: opts.Namespace, + LeaderElectionReleaseOnCancel: true, + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + RetryPeriod: &retryPeriod, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{opts.Namespace: {}}, + }, + }) + if err != nil { + return fmt.Errorf("unable to start manager: %w", err) + } + + controllerName := "EtcdDefragController" + if err := (&DefragController{ + Client: mgr.GetClient(), + log: logger, + ControllerName: controllerName, + CreateOrUpdateProvider: upsert.New(false), + }).SetupWithManager(ctx, mgr); err != nil { + return fmt.Errorf("unable to create controller: %s: %w", controllerName, err) + } + + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + return fmt.Errorf("problem running manager: %w", err) + } + return nil +} diff --git a/fast.Dockerfile b/fast.Dockerfile index 60264e279c..cc46077261 100644 --- a/fast.Dockerfile +++ b/fast.Dockerfile @@ -10,6 +10,7 @@ LABEL io.openshift.hypershift.control-plane-operator-manages.decompress-decode-c LABEL io.openshift.hypershift.control-plane-operator-creates-aws-sg=true LABEL io.openshift.hypershift.control-plane-operator-applies-management-kas-network-policy-label=true LABEL io.openshift.hypershift.restricted-psa=true +LABEL io.openshift.hypershift.control-plane-pki-operator-signs-csrs=true RUN cd /usr/bin && \ ln -s control-plane-operator ignition-server && \ diff --git a/go.mod b/go.mod index a482d58b89..e4462b5b5a 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/Azure/go-autorest/autorest v0.11.29 github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 github.com/IBM-Cloud/power-go-client v1.3.0 - github.com/IBM/go-sdk-core/v5 v5.14.1 + github.com/IBM/go-sdk-core/v5 v5.17.4 github.com/IBM/ibm-cos-sdk-go v1.9.4 github.com/IBM/networking-go-sdk v0.29.0 github.com/IBM/platform-services-go-sdk v0.48.0 @@ -20,24 +20,26 @@ require ( github.com/clarketm/json v1.14.1 github.com/coreos/ignition/v2 v2.14.0 github.com/docker/distribution v2.8.2+incompatible + github.com/elazarl/goproxy v0.0.0-20240618083138-03be62527ccb github.com/evanphx/json-patch/v5 v5.7.0 github.com/go-logr/logr v1.3.0 github.com/go-logr/zapr v1.2.4 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 - github.com/google/uuid v1.4.0 + github.com/google/uuid v1.6.0 + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/go-uuid v1.0.1 github.com/kubernetes-csi/external-snapshotter/client/v6 v6.0.1 github.com/onsi/gomega v1.30.0 github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b - github.com/openshift/api v0.0.0-20231206170337-f356bd9e2ff6 + github.com/openshift/api v0.0.0-20240215031714-be926bb0d751 github.com/openshift/client-go v0.0.0-20230926161409-848405da69e1 - github.com/openshift/cloud-credential-operator v0.0.0-20220708202639-ef451d260cf6 + github.com/openshift/cloud-credential-operator v0.0.0-20240504012628-26d4761a9f4e github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f github.com/openshift/cluster-node-tuning-operator v0.0.0-20220614214129-2c76314fb3cc - github.com/openshift/library-go v0.0.0-20231128230659-785a9313da6c + github.com/openshift/library-go v0.0.0-20231214171439-128164517bf7 github.com/operator-framework/api v0.10.7 github.com/pkg/errors v0.9.1 github.com/ppc64le-cloud/powervs-utils v0.0.0-20230306072409-bc42a581099f @@ -46,15 +48,23 @@ require ( github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.45.0 github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.8.4 github.com/tombuildsstuff/giovanni v0.18.0 github.com/vincent-petithory/dataurl v1.0.0 + go.etcd.io/etcd/api/v3 v3.5.10 + go.etcd.io/etcd/client/pkg/v3 v3.5.10 + go.etcd.io/etcd/client/v3 v3.5.10 + go.etcd.io/etcd/server/v3 v3.5.10 + go.etcd.io/etcd/tests/v3 v3.5.10 go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.16.0 - golang.org/x/net v0.19.0 + golang.org/x/crypto v0.21.0 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/net v0.23.0 golang.org/x/sync v0.5.0 golang.org/x/time v0.5.0 + google.golang.org/grpc v1.58.3 + gopkg.in/go-jose/go-jose.v2 v2.6.3 gopkg.in/ini.v1 v1.67.0 - gopkg.in/square/go-jose.v2 v2.6.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.28.4 k8s.io/apiextensions-apiserver v0.28.3 @@ -71,7 +81,6 @@ require ( k8s.io/utils v0.0.0-20231127182322-b307cd553661 kubevirt.io/api v1.1.0 kubevirt.io/containerized-data-importer-api v1.57.0 - sigs.k8s.io/apiserver-network-proxy v0.0.24 sigs.k8s.io/cluster-api v1.5.3 sigs.k8s.io/cluster-api-provider-aws/v2 v2.2.4 sigs.k8s.io/cluster-api-provider-azure v1.11.5 @@ -115,52 +124,56 @@ require ( github.com/dimchansky/utfbom v1.1.1 // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-errors/errors v1.5.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.3 // indirect + github.com/go-openapi/errors v0.21.0 // indirect github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.1 // indirect github.com/go-openapi/runtime v0.23.0 // indirect github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/strfmt v0.21.7 // indirect + github.com/go-openapi/strfmt v0.22.1 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/go-openapi/validate v0.21.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.13.0 // indirect + github.com/go-playground/validator/v10 v10.19.0 // indirect github.com/gobuffalo/flect v1.0.2 // indirect github.com/gofrs/uuid v4.2.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/cel-go v0.16.1 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/websocket v1.4.2 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jonboulle/clockwork v0.2.2 // indirect github.com/jongio/azidext/go/azidext v0.5.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/leodido/go-urn v1.2.3 // indirect + github.com/leodido/go-urn v1.4.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect @@ -173,7 +186,6 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect @@ -181,17 +193,22 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/profile v1.3.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect + github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace // indirect github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 // indirect + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect - go.etcd.io/etcd/client/v3 v3.5.9 // indirect - go.mongodb.org/mongo-driver v1.12.1 // indirect + go.etcd.io/bbolt v1.3.8 // indirect + go.etcd.io/etcd/client/v2 v2.305.10 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/raft/v3 v3.5.10 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect go.opentelemetry.io/otel v1.19.0 // indirect @@ -203,18 +220,16 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.starlark.net v0.0.0-20231101134539-556fd59b42f6 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect golang.org/x/oauth2 v0.14.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5 // indirect - google.golang.org/grpc v1.58.3 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/evanphx/json-patch.v5 v5.7.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/go.sum b/go.sum index e395a8c23a..66b88178b0 100644 --- a/go.sum +++ b/go.sum @@ -669,8 +669,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/IBM-Cloud/power-go-client v1.3.0 h1:XI4O8IqmoKbJuEbTuYYaf+eoxRP+LSiB5Wwn26qdjeI= github.com/IBM-Cloud/power-go-client v1.3.0/go.mod h1:0YVWoIQN5I5IvyhO/m4yxgPJqCh9QjceN2FNlVpYlOQ= github.com/IBM/go-sdk-core/v5 v5.8.0/go.mod h1:+YbdhrjCHC84ls4MeBp+Hj4NZCni+tDAc0XQUqRO9Jc= -github.com/IBM/go-sdk-core/v5 v5.14.1 h1:WR1r0zz+gDW++xzZjF41r9ueY4JyjS2vgZjiYs8lO3c= -github.com/IBM/go-sdk-core/v5 v5.14.1/go.mod h1:MUvIr/1mgGh198ZXL+ByKz9Qs1JoEh80v/96x8jPXNY= +github.com/IBM/go-sdk-core/v5 v5.17.4 h1:VGb9+mRrnS2HpHZFM5hy4J6ppIWnwNrw0G+tLSgcJLc= +github.com/IBM/go-sdk-core/v5 v5.17.4/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns= github.com/IBM/ibm-cos-sdk-go v1.9.4 h1:jGIcufCP0ys7QFJKEYSy/0EWn3sl5kOo2LKfa6x4gHk= github.com/IBM/ibm-cos-sdk-go v1.9.4/go.mod h1:1VnKWJhPE536IvitwDxZFH9ycmj/7VDOiw8Mjljb160= github.com/IBM/networking-go-sdk v0.29.0 h1:xpFhUfyHcyPf/urGk12Nx++NUQMddL7WWCaIVV1sC1k= @@ -781,6 +781,7 @@ github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0= @@ -834,8 +835,13 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20240618083138-03be62527ccb h1:2SoxRauy2IqekRMggrQk3yNI5X6omSnk6ugVbFywwXs= +github.com/elazarl/goproxy v0.0.0-20240618083138-03be62527ccb/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM= +github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= @@ -872,6 +878,7 @@ github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSY github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -883,6 +890,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= @@ -945,8 +954,8 @@ github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= -github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= +github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -1010,8 +1019,8 @@ github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/strfmt v0.22.1 h1:5Ky8cybT4576C6Ffc+8gYji/wRXCo6Ozm8RaWjPI6jc= +github.com/go-openapi/strfmt v0.22.1/go.mod h1:OfVoytIXJasDkkGvkb1Cceb3BPyMOwk1FgmyyEw7NYg= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -1045,8 +1054,8 @@ github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/Nu github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.13.0 h1:cFRQdfaSMCOSfGCCLB20MHvuoHb/s5G8L5pu2ppK5AQ= -github.com/go-playground/validator/v10 v10.13.0/go.mod h1:dwu7+CG8/CtBiJFZDz4e+5Upb6OLw04gtBYw0mcG/z4= +github.com/go-playground/validator/v10 v10.19.0 h1:ol+5Fu+cSq9JD7SoSqe04GMI92cbn0+wvQ3bZ8b/AU4= +github.com/go-playground/validator/v10 v10.19.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= @@ -1136,8 +1145,9 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -1202,8 +1212,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= @@ -1259,15 +1269,15 @@ github.com/hashicorp/go-azure-helpers v0.12.0/go.mod h1:Zc3v4DNeX6PDdy7NljlYpnrd github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= -github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -1361,8 +1371,8 @@ github.com/kubevirt/containerized-data-importer-api v1.41.1-0.20211201033752-055 github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= -github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= @@ -1385,6 +1395,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -1392,6 +1403,7 @@ github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcME github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -1502,12 +1514,12 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= -github.com/openshift/api v0.0.0-20231206170337-f356bd9e2ff6 h1:XxmuE/mW0VMIxJ78M0zAgTY/Fi0grVJgSnSgr0qjICU= -github.com/openshift/api v0.0.0-20231206170337-f356bd9e2ff6/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU= +github.com/openshift/api v0.0.0-20240215031714-be926bb0d751 h1:03FScdYGtdERC3ygX6T3IPJzSodMTl8zC/kVxW1Lzsg= +github.com/openshift/api v0.0.0-20240215031714-be926bb0d751/go.mod h1:qNtV0315F+f8ld52TLtPvrfivZpdimOzTi3kn9IVbtU= github.com/openshift/client-go v0.0.0-20230926161409-848405da69e1 h1:W1N/3nVciqmjPjn2xldHjb0AwwCQzlGxLvX5BCgE8H4= github.com/openshift/client-go v0.0.0-20230926161409-848405da69e1/go.mod h1:ihUJrhBcYAGYQrJu/gP2OMgfVds5f5z5kbeLNBqjHLo= -github.com/openshift/cloud-credential-operator v0.0.0-20220708202639-ef451d260cf6 h1:+hva5Mtz22jmpu9v7RlE1iVqHUZu72nRjmpV+eLNIGQ= -github.com/openshift/cloud-credential-operator v0.0.0-20220708202639-ef451d260cf6/go.mod h1:qTEqKytotRAu1/N5Grbfd0DGjD56EYk4zNsHg+4Hvlw= +github.com/openshift/cloud-credential-operator v0.0.0-20240504012628-26d4761a9f4e h1:DjC4QItD712V6LDg9Jhd/Zu0Su4jKzrpoGN2wRm/6Lo= +github.com/openshift/cloud-credential-operator v0.0.0-20240504012628-26d4761a9f4e/go.mod h1:/DAxHTQz6bzHF82MyF6mq4GbkMWA2nb3+WRsVEMqiMc= github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f h1:YWIaRFHMNzxi26FbT0TE9zl4pBzfeiZjQJv0JUzibUc= github.com/openshift/cluster-api-provider-agent/api v0.0.0-20230918065757-81658c4ddf2f/go.mod h1:RNzrOK4TA6LdyrgrEvb1TXjOnhRC+iBmnBoAa/4ivBw= github.com/openshift/cluster-node-tuning-operator v0.0.0-20220614214129-2c76314fb3cc h1:fEmKNhv7Ph5ACdTa+qvQW0aKeg2glab/D94v6oPgQ1o= @@ -1515,8 +1527,8 @@ github.com/openshift/cluster-node-tuning-operator v0.0.0-20220614214129-2c76314f github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= -github.com/openshift/library-go v0.0.0-20231128230659-785a9313da6c h1:L/nRp+uq0MKKvK14y18Ua84dYF860b1dCE973wSQ1do= -github.com/openshift/library-go v0.0.0-20231128230659-785a9313da6c/go.mod h1:8UzmrBMCn7+GzouL8DVYkL9COBQTB1Ggd13/mHJQCUg= +github.com/openshift/library-go v0.0.0-20231214171439-128164517bf7 h1:n8IAM02eWgY32s+XmHSpIg0UEe3QwBbNaXAxF9O8a7s= +github.com/openshift/library-go v0.0.0-20231214171439-128164517bf7/go.mod h1:0q1UIvboZXfSlUaK+08wsXYw4N6OUo2b/z3a1EWNGyw= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -1595,6 +1607,7 @@ github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1672,12 +1685,14 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/tombuildsstuff/giovanni v0.18.0 h1:AiSnRtD+yUBS9STKjOsi1pmkBHnNRsLCsp9fnDBFH14= github.com/tombuildsstuff/giovanni v0.18.0/go.mod h1:66KVLYma2whJhEdxPSPL3GQHkulhK+C5CluKfHGfPF4= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -1688,9 +1703,7 @@ github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9 github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3/go.mod h1:CSBTxrhePCm0cmXNKDGeu+6bOQzpaEklfCqEpn89JWk= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -1713,26 +1726,32 @@ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= -go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= -go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= -go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= +go.etcd.io/etcd/tests/v3 v3.5.10 h1:F1pbXwKxwZ58aBT2+CSL/r8WUCAVhob0y1y8OVJ204s= +go.etcd.io/etcd/tests/v3 v3.5.10/go.mod h1:vVMWDv9OhopxfJCd+CMI4pih0zUDqlkJj6JcBNlUVXI= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= @@ -1743,8 +1762,8 @@ go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4S go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= -go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1829,13 +1848,12 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1958,6 +1976,7 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1981,8 +2000,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2156,8 +2175,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -2170,8 +2189,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2591,8 +2610,8 @@ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2605,6 +2624,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v5 v5.7.0 h1:dGKGylPlZ/jus2g1YqhhyzfH0gPy2R8/MYUpW/OslTY= gopkg.in/evanphx/json-patch.v5 v5.7.0/go.mod h1:/kvTRh1TVm5wuM6OkHxqXtE/1nUZZpihg29RtuIyfvk= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/go-playground/validator.v9 v9.31.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= @@ -2621,8 +2642,6 @@ gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYs gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -2655,7 +2674,6 @@ honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= k8s.io/api v0.0.0-20190725062911-6607c48751ae/go.mod h1:1O0xzX/RAtnm7l+5VEUxZ1ysO2ghatfq/OZED4zM9kA= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8= -k8s.io/api v0.20.10/go.mod h1:0kei3F6biGjtRQBo5dUeujq6Ji3UCh9aOSfp/THYd7I= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= @@ -2671,7 +2689,6 @@ k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM k8s.io/apimachinery v0.0.0-20190719140911-bfcf53abc9f8/go.mod h1:sBJWIJZfxLhp7mRsRyuAE/NfKTr3kXGR1iaqg8O0gJo= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.10/go.mod h1:kQa//VOAwyVwJ2+L9kOREbsnryfsGSkSM1przND4+mw= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= @@ -2697,7 +2714,6 @@ k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpx k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.2/go.mod h1:pzFtCiwe/ASD0iV7ySMu8SYVJjCapNM9bjvk7ptpKh0= -k8s.io/component-base v0.20.10/go.mod h1:ZKOEin1xu68aJzxgzl5DZSp5J1IrjAOPlPN90/t6OI8= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= @@ -2786,8 +2802,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy v0.0.24 h1:yaswrAqidc2XdLK2GRacVEBb55g4dg91f/B7b0SYliY= -sigs.k8s.io/apiserver-network-proxy v0.0.24/go.mod h1:z/U9KltvRVSMttVl3cdQo8cPuXEjr+Qn3A5sUJR55XI= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.1 h1:suc8HHvU5zDtc5N+CiGu1YG/ipwvkv7vPB9OUMUVjW8= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.1/go.mod h1:o6mPitrlgUex6ZK5mp4VerN54SOVNzSWELwWnr8wep0= sigs.k8s.io/cluster-api v1.5.3 h1:TtxneDCps14sZ9bNr515ivBRMj9OwUE6mRUr6l7fSBA= diff --git a/hack/app-sre/saas_template.yaml b/hack/app-sre/saas_template.yaml index 0f7dc1d3e4..d167fa5d23 100644 --- a/hack/app-sre/saas_template.yaml +++ b/hack/app-sre/saas_template.yaml @@ -10,6 +10,7 @@ objects: metadata: creationTimestamp: null name: hypershift-control-plane + preemptionPolicy: Never value: 100000000 - apiVersion: scheduling.k8s.io/v1 description: This priority class should be used for hypershift etcd pods. @@ -17,6 +18,7 @@ objects: metadata: creationTimestamp: null name: hypershift-etcd + preemptionPolicy: Never value: 100002000 - apiVersion: scheduling.k8s.io/v1 description: This priority class should be used for hypershift control plane pods @@ -25,6 +27,7 @@ objects: metadata: creationTimestamp: null name: hypershift-api-critical + preemptionPolicy: Never value: 100001000 - apiVersion: scheduling.k8s.io/v1 description: This priority class is used for hypershift operator pods @@ -60,6 +63,12 @@ objects: - '*' verbs: - '*' + - apiGroups: + - certificates.hypershift.openshift.io + resources: + - '*' + verbs: + - '*' - apiGroups: - config.openshift.io resources: @@ -150,6 +159,7 @@ objects: - update - patch - delete + - deletecollection - apiGroups: - "" resources: @@ -262,6 +272,38 @@ objects: - validatingwebhookconfigurations verbs: - delete + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - patch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update + - apiGroups: + - certificates.k8s.io + resources: + - signers + verbs: + - approve + - apiGroups: + - certificates.k8s.io + resources: + - signers + verbs: + - sign - apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -385,8 +427,10 @@ objects: - --txt-owner-id=${EXTERNAL_DNS_TXT_OWNER_ID} - --label-filter=hypershift.openshift.io/route-visibility!=private - --interval=1m + - --txt-cache-interval=1h - --aws-zone-type=public - --aws-batch-change-interval=10s + - --aws-zones-cache-duration=1h command: - /external-dns env: @@ -414,6 +458,9 @@ objects: requests: cpu: 5m memory: 20Mi + securityContext: + privileged: false + readOnlyRootFilesystem: true volumeMounts: - mountPath: /etc/provider name: credentials @@ -461,6 +508,7 @@ objects: - --namespace=$(MY_NAMESPACE) - --pod-name=$(MY_NAME) - --metrics-addr=:9000 + - --enable-dedicated-request-serving-isolation=true - --enable-ocp-cluster-monitoring=false - --enable-ci-debug-output=false - --private-platform=AWS @@ -528,6 +576,8 @@ objects: capabilities: drop: - ALL + privileged: false + readOnlyRootFilesystem: true runAsUser: 1000 seccompProfile: type: RuntimeDefault @@ -553,6 +603,8 @@ objects: name: init-environment resources: {} securityContext: + privileged: false + readOnlyRootFilesystem: true runAsUser: 1000 volumeMounts: - mountPath: /var/run/ca-trust @@ -33829,6 +33881,227 @@ objects: plural: "" conditions: null storedVersions: null +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + name: certificaterevocationrequests.certificates.hypershift.openshift.io + spec: + group: certificates.hypershift.openshift.io + names: + kind: CertificateRevocationRequest + listKind: CertificateRevocationRequestList + plural: certificaterevocationrequests + shortNames: + - crr + - crrs + singular: certificaterevocationrequest + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: CertificateRevocationRequest defines the desired state of CertificateRevocationRequest. + A request denotes the user's desire to revoke a signer certificate of + the class indicated in spec. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateRevocationRequestSpec defines the desired state + of CertificateRevocationRequest + properties: + signerClass: + description: SignerClass identifies the class of signer to revoke. + All the active signing CAs for the signer class will be revoked. + enum: + - customer-break-glass + - sre-break-glass + type: string + x-kubernetes-validations: + - message: signerClass is immutable + rule: self == oldSelf + required: + - signerClass + type: object + status: + description: CertificateRevocationRequestStatus defines the observed + state of CertificateRevocationRequest + properties: + conditions: + description: Conditions contain details about the various aspects + of certificate revocation. + items: + description: "Condition contains details for one aspect of the + current state of this API Resource. --- This struct is intended + for direct use as an array at the field path .status.conditions. + \ For example, \n type FooStatus struct{ // Represents the observations + of a foo's current state. // Known .status.conditions.type are: + \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type + // +patchStrategy=merge // +listType=map // +listMapKey=type + Conditions []metav1.Condition `json:\"conditions,omitempty\" + patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` + \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be + when the underlying condition changed. If that is not known, + then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if + .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be + useful (see .node.status.conditions), the ability to deconflict + is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + previousSigner: + description: PreviousSigner stores a reference to the previous signer + certificate. We require storing this data to ensure that we can + validate that the old signer is no longer valid before considering + revocation complete. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + revocationTimestamp: + description: RevocationTimestamp is the cut-off time for signing + CAs to be revoked. All certificates that are valid before this + time will be revoked; all re-generated certificates will not be + valid at or before this time. + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.12.0 + creationTimestamp: null + name: certificatesigningrequestapprovals.certificates.hypershift.openshift.io + spec: + group: certificates.hypershift.openshift.io + names: + kind: CertificateSigningRequestApproval + listKind: CertificateSigningRequestApprovalList + plural: certificatesigningrequestapprovals + shortNames: + - csra + - csras + singular: certificatesigningrequestapproval + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: CertificateSigningRequestApproval defines the desired state + of CertificateSigningRequestApproval + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateSigningRequestApprovalSpec defines the desired + state of CertificateSigningRequestApproval + type: object + status: + description: CertificateSigningRequestApprovalStatus defines the observed + state of CertificateSigningRequestApproval + type: object + type: object + served: true + storage: true + status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -34199,6 +34472,74 @@ objects: plural: "" conditions: null storedVersions: null +- apiVersion: apiextensions.k8s.io/v1 + kind: CustomResourceDefinition + metadata: + annotations: + service.beta.openshift.io/inject-cabundle: "true" + creationTimestamp: null + name: certificatesigningrequestapprovals.hypershift.openshift.io + spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: operator + namespace: ${NAMESPACE} + path: /convert + port: 443 + conversionReviewVersions: + - v1beta2 + - v1beta1 + - v1alpha1 + group: hypershift.openshift.io + names: + kind: CertificateSigningRequestApproval + listKind: CertificateSigningRequestApprovalList + plural: certificatesigningrequestapprovals + shortNames: + - csra + - csras + singular: certificatesigningrequestapproval + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CertificateSigningRequestApproval defines the desired state + of CertificateSigningRequestApproval + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: CertificateSigningRequestApprovalSpec defines the desired + state of CertificateSigningRequestApproval + type: object + status: + description: CertificateSigningRequestApprovalStatus defines the observed + state of CertificateSigningRequestApproval + type: object + type: object + served: true + storage: true + status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null - apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: @@ -34816,7 +35157,8 @@ objects: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -36284,6 +36626,47 @@ objects: type: integer type: object type: object + operatorhub: + description: OperatorHub specifies the configuration for the + Operator Lifecycle Manager in the HostedCluster. This is only + configured at deployment time but the controller are not reconcilling + over it. The OperatorHub configuration will be constantly + reconciled if catalog placement is management, but only on + cluster creation otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources + and their configuration. If the list is empty, it implies + that the default hub sources are enabled on the cluster + unless disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default + hub source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. @@ -39012,7 +39395,8 @@ objects: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -40460,6 +40844,47 @@ objects: rule: '!has(self.tokenConfig) || !has(self.tokenConfig.accessTokenInactivityTimeout) || duration(self.tokenConfig.accessTokenInactivityTimeout).getSeconds() >= 300' + operatorhub: + description: OperatorHub specifies the configuration for the + Operator Lifecycle Manager in the HostedCluster. This is only + configured at deployment time but the controller are not reconcilling + over it. The OperatorHub configuration will be constantly + reconciled if catalog placement is management, but only on + cluster creation otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources + and their configuration. If the list is empty, it implies + that the default hub sources are enabled on the cluster + unless disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default + hub source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. @@ -43127,7 +43552,8 @@ objects: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -44595,6 +45021,47 @@ objects: type: integer type: object type: object + operatorhub: + description: OperatorHub specifies the configuration for the + Operator Lifecycle Manager in the HostedCluster. This is only + configured at deployment time but the controller are not reconcilling + over it. The OperatorHub configuration will be constantly + reconciled if catalog placement is management, but only on + cluster creation otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources + and their configuration. If the list is empty, it implies + that the default hub sources are enabled on the cluster + unless disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default + hub source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. @@ -44741,6 +45208,9 @@ objects: policy applied to critical control plane components. The default value is SingleReplica. type: string + x-kubernetes-validations: + - message: ControllerAvailabilityPolicy is immutable + rule: self == oldSelf dns: description: DNSSpec specifies the DNS configuration in the cluster. properties: @@ -47312,7 +47782,8 @@ objects: items: minLength: 1 type: string - maxItems: 1 + maxItems: 10 + minItems: 1 type: array x-kubernetes-list-type: set issuerCertificateAuthority: @@ -48760,6 +49231,47 @@ objects: rule: '!has(self.tokenConfig) || !has(self.tokenConfig.accessTokenInactivityTimeout) || duration(self.tokenConfig.accessTokenInactivityTimeout).getSeconds() >= 300' + operatorhub: + description: OperatorHub specifies the configuration for the + Operator Lifecycle Manager in the HostedCluster. This is only + configured at deployment time but the controller are not reconcilling + over it. The OperatorHub configuration will be constantly + reconciled if catalog placement is management, but only on + cluster creation otherwise. + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable + all the default hub sources. If this is true, a specific + entry in sources can be used to enable a default source. + If this is false, a specific entry in sources can be used + to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources + and their configuration. If the list is empty, it implies + that the default hub sources are enabled on the cluster + unless disableAllDefaultSources is true. If disableAllDefaultSources + is true and sources is not empty, the configuration present + in sources will take precedence. The list of default hub + sources and their current state will always be reflected + in the status block. + items: + description: HubSource is used to specify the hub source + and its configuration + properties: + disabled: + description: disabled is used to disable a default + hub source on cluster + type: boolean + name: + description: name is the name of one of the default + hub sources + maxLength: 253 + minLength: 1 + type: string + type: object + type: array + type: object proxy: description: Proxy holds cluster-wide information on how to configure default proxies for the cluster. @@ -48889,6 +49401,9 @@ objects: policy applied to critical control plane components. The default value is SingleReplica. type: string + x-kubernetes-validations: + - message: ControllerAvailabilityPolicy is immutable + rule: self == oldSelf dns: description: DNSSpec specifies the DNS configuration in the cluster. properties: @@ -51437,6 +51952,14 @@ objects: - Enable - Disable type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the kubevirt VirtualMachine to fit on a node. Selector + which must match a node''s labels for the VM to be scheduled + on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object rootVolume: default: persistent: @@ -52419,6 +52942,14 @@ objects: - Enable - Disable type: string + nodeSelector: + additionalProperties: + type: string + description: 'NodeSelector is a selector which must be true + for the kubevirt VirtualMachine to fit on a node. Selector + which must match a node''s labels for the VM to be scheduled + on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object rootVolume: default: persistent: diff --git a/hack/ci-test-e2e.sh b/hack/ci-test-e2e.sh index e8639c6bd2..61fad4885e 100755 --- a/hack/ci-test-e2e.sh +++ b/hack/ci-test-e2e.sh @@ -41,7 +41,7 @@ if [[ -n "${REQUEST_SERVING_COMPONENT_TEST}" ]]; then --e2e.management-cluster-name=$(cat "${SHARED_DIR}/management_cluster_name")" fi -bin/test-e2e \ +declare -a default_args=( -test.v \ -test.timeout=2h10m \ -test.run=${CI_TESTS_RUN} \ @@ -57,6 +57,21 @@ bin/test-e2e \ --e2e.additional-tags="expirationDate=$(date -d '4 hours' --iso=minutes --utc)" \ --e2e.aws-endpoint-access=PublicAndPrivate \ --e2e.external-dns-domain=service.ci.hypershift.devcluster.openshift.com \ - ${REQUEST_SERVING_COMPONENT_PARAMS} | tee /tmp/test_out & + ${REQUEST_SERVING_COMPONENT_PARAMS} +) + + +# We would like all end-to-end testing for HyperShift to use this script, so we can set flags centrally +# and provide the jUnit results, etc, for everyone in the same way. In order to do that, we need to allow +# each consumer to pass disjoint sets of flags to the end-to-end binary. We already accept one argument, +# the set of tests to run, so we will continue to honor the previous calling convention unless the caller +# is passing more flags. That allows us to default to the current behavior and let callers opt into the +# new paradigm over time. Once that migration is done, default_args will be removed. +declare -a args="$@" +if [[ "$#" -lt 2 ]]; then + args="${default_args[@]}" +fi + +bin/test-e2e ${args} | tee /tmp/test_out & wait $! diff --git a/hack/tools/git-hooks/cpo-containerfiles-in-sync.sh b/hack/tools/git-hooks/cpo-containerfiles-in-sync.sh new file mode 100755 index 0000000000..2f55d5b509 --- /dev/null +++ b/hack/tools/git-hooks/cpo-containerfiles-in-sync.sh @@ -0,0 +1,9 @@ +#!/bin/bash +echo >&2 "Processing " "$@" + +eval_cmd=("diff") +for f in "$@"; do + eval_cmd+=("<(sed -e '/^FROM /d' \"$f\")") +done + +eval "${eval_cmd[*]}" diff --git a/hack/tools/go.mod b/hack/tools/go.mod index 1ec785d2a8..028b3a839a 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -11,7 +11,7 @@ require ( sigs.k8s.io/controller-tools v0.12.0 ) -replace k8s.io/code-generator => github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231106164047-541b094e7aaa +replace k8s.io/code-generator => github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231218200749-2151937f4610 require ( cloud.google.com/go/compute v1.19.1 // indirect diff --git a/hack/tools/go.sum b/hack/tools/go.sum index 8e7ae22d80..4298183892 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -578,8 +578,8 @@ github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRM github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231106164047-541b094e7aaa h1:tOk+4KXFBmQP0D+k+UtU4aXq9jliOzu1Z3mwiDdDoYQ= -github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231106164047-541b094e7aaa/go.mod h1:C1oDIDCuN+hZsr8bZVFUp6dsOKvvMZ6jcmE4SFQn//8= +github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231218200749-2151937f4610 h1:DE8bxqW3Pz2tPP32l/NiZpx7PW1WIaLv2WaFuQHOOuA= +github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231218200749-2151937f4610/go.mod h1:C1oDIDCuN+hZsr8bZVFUp6dsOKvvMZ6jcmE4SFQn//8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= diff --git a/hack/tools/vendor/k8s.io/code-generator/kube_codegen.sh b/hack/tools/vendor/k8s.io/code-generator/kube_codegen.sh index 9882877cef..a030731088 100644 --- a/hack/tools/vendor/k8s.io/code-generator/kube_codegen.sh +++ b/hack/tools/vendor/k8s.io/code-generator/kube_codegen.sh @@ -586,6 +586,7 @@ function kube::codegen::gen_client() { --go-header-file "${boilerplate}" \ --output-base "${out_base}" \ --output-package "${out_pkg_root}/${applyconfig_subdir}" \ + --external-applyconfigurations k8s.io/apimachinery/pkg/apis/meta/v1.Condition:k8s.io/client-go/applyconfigurations/meta/v1 \ "${inputs[@]}" fi diff --git a/hack/tools/vendor/modules.txt b/hack/tools/vendor/modules.txt index 8303946d87..9c257c34d9 100644 --- a/hack/tools/vendor/modules.txt +++ b/hack/tools/vendor/modules.txt @@ -1172,7 +1172,7 @@ k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.28.3 => github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231106164047-541b094e7aaa +# k8s.io/code-generator v0.28.3 => github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231218200749-2151937f4610 ## explicit; go 1.21 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen @@ -1290,4 +1290,4 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# k8s.io/code-generator => github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231106164047-541b094e7aaa +# k8s.io/code-generator => github.com/stevekuznetsov/k8s-code-generator v0.0.0-20231218200749-2151937f4610 diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go index a263271b63..0a26f57301 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller.go @@ -39,6 +39,9 @@ import ( configv1 "github.com/openshift/api/config/v1" routev1 "github.com/openshift/api/route/v1" agentv1 "github.com/openshift/cluster-api-provider-agent/api/v1beta1" + cpomanifests "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + controlplanepkioperatormanifests "github.com/openshift/hypershift/hypershift-operator/controllers/manifests/controlplanepkioperator" prometheusoperatorv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "gopkg.in/ini.v1" appsv1 "k8s.io/api/apps/v1" @@ -50,6 +53,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" @@ -64,6 +68,7 @@ import ( capikubevirt "sigs.k8s.io/cluster-api-provider-kubevirt/api/v1alpha1" capiv1 "sigs.k8s.io/cluster-api/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -90,6 +95,7 @@ import ( "github.com/openshift/hypershift/support/capabilities" "github.com/openshift/hypershift/support/certs" "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/globalconfig" "github.com/openshift/hypershift/support/images" "github.com/openshift/hypershift/support/infraid" "github.com/openshift/hypershift/support/metrics" @@ -119,6 +125,7 @@ const ( controlPlaneOperatorManagesMachineApprover = "io.openshift.hypershift.control-plane-operator-manages.cluster-machine-approver" controlPlaneOperatorManagesMachineAutoscaler = "io.openshift.hypershift.control-plane-operator-manages.cluster-autoscaler" controlPlaneOperatorAppliesManagementKASNetworkPolicyLabel = "io.openshift.hypershift.control-plane-operator-applies-management-kas-network-policy-label" + controlPlanePKIOperatorSignsCSRsLabel = "io.openshift.hypershift.control-plane-pki-operator-signs-csrs" useRestrictedPodSecurityLabel = "io.openshift.hypershift.restricted-psa" etcdEncKeyPostfix = "-etcd-encryption-key" @@ -141,8 +148,7 @@ type HostedClusterReconciler struct { // 2) The OCP version being deployed is the latest version supported by Hypershift HypershiftOperatorImage string - // ReleaseProvider looks up the OCP version for the release images in HostedClusters - ReleaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides + OpenShiftImageRegistryOverrides map[string]string // SetDefaultSecurityContext is used to configure Security Context for containers SetDefaultSecurityContext bool @@ -161,13 +167,13 @@ type HostedClusterReconciler struct { OIDCStorageProviderS3BucketName string S3Client s3iface.S3API - ImageMetadataProvider hyperutil.ImageMetadataProvider - MetricsSet metrics.MetricsSet SREConfigHash string OperatorNamespace string + ReconcileMetadataProviders func(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) + overwriteReconcile func(ctx context.Context, req ctrl.Request, log logr.Logger, hcluster *hyperv1.HostedCluster) (ctrl.Result, error) now func() metav1.Time KubevirtInfraClients kvinfra.KubevirtInfraClientMap @@ -195,14 +201,14 @@ func (r *HostedClusterReconciler) SetupWithManager(mgr ctrl.Manager, createOrUpd // ignitionserver manifests packages. Since we're receiving watch events across // namespaces, the events are filtered to enqueue only those resources which // are annotated as being associated with a hostedcluster (using an annotation). - builder := ctrl.NewControllerManagedBy(mgr). - For(&hyperv1.HostedCluster{}). + bldr := ctrl.NewControllerManagedBy(mgr). + For(&hyperv1.HostedCluster{}, builder.WithPredicates(hyperutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). WithOptions(controller.Options{ RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 10*time.Second), MaxConcurrentReconciles: 10, }) for _, managedResource := range r.managedResources() { - builder.Watches(managedResource, handler.EnqueueRequestsFromMapFunc(enqueueHostedClustersFunc(metricsSet, operatorNamespace, mgr.GetClient()))) + bldr.Watches(managedResource, handler.EnqueueRequestsFromMapFunc(enqueueHostedClustersFunc(metricsSet, operatorNamespace, mgr.GetClient())), builder.WithPredicates(hyperutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))) } // Set based on SCC capability @@ -210,7 +216,9 @@ func (r *HostedClusterReconciler) SetupWithManager(mgr ctrl.Manager, createOrUpd // When SCC is not available (Kubernetes), we want to explicitly set a default (non-root) security context r.SetDefaultSecurityContext = !r.ManagementClusterCapabilities.Has(capabilities.CapabilitySecurityContextConstraint) - return builder.Complete(r) + r.ReconcileMetadataProviders = r.ReconcileMetadataProvidersImpl + + return bldr.Complete(r) } // managedResources are all the resources that are managed as childresources for a HostedCluster @@ -326,6 +334,12 @@ func (r *HostedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques return res, err } +func (r *HostedClusterReconciler) ReconcileMetadataProvidersImpl(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + releaseProvider, imageMetadataProvider, err := globalconfig.RenconcileMgmtImageRegistryOverrides(ctx, r.ManagementClusterCapabilities, r.Client, imgOverrides) + + return releaseProvider, imageMetadataProvider, err +} + func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Request, log logr.Logger, hcluster *hyperv1.HostedCluster) (ctrl.Result, error) { controlPlaneNamespace := manifests.HostedControlPlaneNamespaceObject(hcluster.Namespace, hcluster.Name) hcp := controlplaneoperator.HostedControlPlane(controlPlaneNamespace.Name, hcluster.Name) @@ -371,6 +385,34 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } } + // Bubble up AWSDefaultSecurityGroupDeleted condition from the hostedControlPlane. + // We set this condition even if the HC is being deleted, so we can report blocking objects on deletion. + { + if hcp != nil && hcp.DeletionTimestamp != nil { + freshCondition := &metav1.Condition{ + Type: string(hyperv1.AWSDefaultSecurityGroupDeleted), + Status: metav1.ConditionUnknown, + Reason: hyperv1.StatusUnknownReason, + ObservedGeneration: hcluster.Generation, + } + + securityGroupDeletionCondition := meta.FindStatusCondition(hcp.Status.Conditions, string(hyperv1.AWSDefaultSecurityGroupDeleted)) + if securityGroupDeletionCondition != nil { + freshCondition = securityGroupDeletionCondition + } + + oldCondition := meta.FindStatusCondition(hcluster.Status.Conditions, string(hyperv1.AWSDefaultSecurityGroupDeleted)) + if oldCondition == nil || oldCondition.Message != freshCondition.Message { + freshCondition.ObservedGeneration = hcluster.Generation + meta.SetStatusCondition(&hcluster.Status.Conditions, *freshCondition) + // Persist status updates + if err := r.Client.Status().Update(ctx, hcluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update status: %w", err) + } + } + } + } + // Bubble up CloudResourcesDestroyed condition from the hostedControlPlane. // We set this condition even if the HC is being deleted, so we can construct SLIs for deletion times. { @@ -428,6 +470,30 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } } + // Once the deletion has occurred, we need to clean up cluster-wide resources + selector := client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(labels.Set{ + controlplanepkioperatormanifests.OwningHostedClusterNamespaceLabel: hcluster.Namespace, + controlplanepkioperatormanifests.OwningHostedClusterNameLabel: hcluster.Name, + })} + var crs rbacv1.ClusterRoleList + if err := r.List(ctx, &crs, selector); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list cluster roles: %w", err) + } + if len(crs.Items) > 0 { + if err := r.DeleteAllOf(ctx, &rbacv1.ClusterRole{}, selector); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to delete cluster roles: %w", err) + } + } + var crbs rbacv1.ClusterRoleBindingList + if err := r.List(ctx, &crbs, selector); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to list cluster role bindings: %w", err) + } + if len(crbs.Items) > 0 { + if err := r.DeleteAllOf(ctx, &rbacv1.ClusterRoleBinding{}, selector); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to delete cluster role bindings: %w", err) + } + } + if hcDestroyGracePeriod > 0 { if hostedClusterDestroyedCondition == nil { hostedClusterDestroyedCondition = &metav1.Condition{ @@ -469,12 +535,6 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques // Part zero: fix up conversion originalSpec := hcluster.Spec.DeepCopy() - createOrUpdate := r.createOrUpdate(req) - - if err = r.reconcileCLISecrets(ctx, createOrUpdate, hcluster); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to reconcile the CLI secrets: %w", err) - } - // Reconcile converted AWS roles. if hcluster.Spec.Platform.AWS != nil { if err := r.dereferenceAWSRoles(ctx, &hcluster.Spec.Platform.AWS.RolesRef, hcluster.Namespace); err != nil { @@ -492,6 +552,8 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } } + createOrUpdate := r.createOrUpdate(req) + // Reconcile platform defaults if err := r.reconcilePlatformDefaultSettings(ctx, hcluster, createOrUpdate, log); err != nil { return ctrl.Result{}, err @@ -518,6 +580,12 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } } + // Reconcile the ICSP/IDMS from the management cluster + releaseProvider, registryClientImageMetadataProvider, err := r.ReconcileMetadataProviders(ctx, r.OpenShiftImageRegistryOverrides) + if err != nil { + return ctrl.Result{}, err + } + // Set kubeadminPassword status { explicitOauthConfig := hcluster.Spec.Configuration != nil && hcluster.Spec.Configuration.OAuth != nil @@ -642,6 +710,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques hyperv1.ExternalDNSReachable, hyperv1.ValidHostedControlPlaneConfiguration, hyperv1.ValidReleaseInfo, + hyperv1.ValidIDPConfiguration, } for _, conditionType := range hcpConditions { @@ -927,7 +996,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques Type: string(hyperv1.ValidReleaseImage), ObservedGeneration: hcluster.Generation, } - err := r.validateReleaseImage(ctx, hcluster) + err := r.validateReleaseImage(ctx, hcluster, releaseProvider) if err != nil { condition.Status = metav1.ConditionFalse condition.Message = err.Error() @@ -946,7 +1015,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } } - releaseImage, err := r.lookupReleaseImage(ctx, hcluster) + releaseImage, err := r.lookupReleaseImage(ctx, hcluster, releaseProvider) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to lookup release image: %w", err) } @@ -964,7 +1033,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques if err != nil { condition.Status = metav1.ConditionFalse condition.Message = err.Error() - condition.Reason = "Blocked" + condition.Reason = hyperv1.BlockedReason } if progressing { condition.Status = metav1.ConditionTrue @@ -1008,6 +1077,10 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } + if err = r.reconcileCLISecrets(ctx, createOrUpdate, hcluster); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile the CLI secrets: %w", err) + } + // Set the infraID as Tag on all created AWS if err := r.reconcileAWSResourceTags(ctx, hcluster); err != nil { return ctrl.Result{}, err @@ -1053,28 +1126,30 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques if !ok { return ctrl.Result{}, fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey) } - controlPlaneOperatorImage, err := GetControlPlaneOperatorImage(ctx, hcluster, r.ReleaseProvider, r.HypershiftOperatorImage, pullSecretBytes) + controlPlaneOperatorImage, err := GetControlPlaneOperatorImage(ctx, hcluster, releaseProvider, r.HypershiftOperatorImage, pullSecretBytes) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to get controlPlaneOperatorImage: %w", err) } - controlPlaneOperatorImageMetadata, err := r.ImageMetadataProvider.ImageMetadata(ctx, controlPlaneOperatorImage, pullSecretBytes) + controlPlaneOperatorImageLabels, err := GetControlPlaneOperatorImageLabels(ctx, hcluster, controlPlaneOperatorImage, pullSecretBytes, registryClientImageMetadataProvider) if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to look up image metadata for %s: %w", controlPlaneOperatorImage, err) + return ctrl.Result{}, fmt.Errorf("failed to get controlPlaneOperatorImageLabels: %w", err) } + cpoHasUtilities := false - if _, hasLabel := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorSubcommandsLabel]; hasLabel { + if _, hasLabel := controlPlaneOperatorImageLabels[controlPlaneOperatorSubcommandsLabel]; hasLabel { cpoHasUtilities = true } utilitiesImage := controlPlaneOperatorImage if !cpoHasUtilities { utilitiesImage = r.HypershiftOperatorImage } - _, ignitionServerHasHealthzHandler := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[ignitionServerHealthzHandlerLabel] - _, controlplaneOperatorManagesIgnitionServer := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlplaneOperatorManagesIgnitionServerLabel] - _, controlPlaneOperatorManagesMachineAutoscaler := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineAutoscaler] - _, controlPlaneOperatorManagesMachineApprover := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorManagesMachineApprover] - _, controlPlaneOperatorAppliesManagementKASNetworkPolicyLabel := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[controlPlaneOperatorAppliesManagementKASNetworkPolicyLabel] - _, useRestrictedPSA := hyperutil.ImageLabels(controlPlaneOperatorImageMetadata)[useRestrictedPodSecurityLabel] + _, ignitionServerHasHealthzHandler := controlPlaneOperatorImageLabels[ignitionServerHealthzHandlerLabel] + _, controlplaneOperatorManagesIgnitionServer := controlPlaneOperatorImageLabels[controlplaneOperatorManagesIgnitionServerLabel] + _, controlPlaneOperatorManagesMachineAutoscaler := controlPlaneOperatorImageLabels[controlPlaneOperatorManagesMachineAutoscaler] + _, controlPlaneOperatorManagesMachineApprover := controlPlaneOperatorImageLabels[controlPlaneOperatorManagesMachineApprover] + _, controlPlaneOperatorAppliesManagementKASNetworkPolicyLabel := controlPlaneOperatorImageLabels[controlPlaneOperatorAppliesManagementKASNetworkPolicyLabel] + _, controlPlanePKIOperatorSignsCSRs := controlPlaneOperatorImageLabels[controlPlanePKIOperatorSignsCSRsLabel] + _, useRestrictedPSA := controlPlaneOperatorImageLabels[useRestrictedPodSecurityLabel] // Reconcile the hosted cluster namespace _, err = createOrUpdate(ctx, r.Client, controlPlaneNamespace, func() error { @@ -1114,7 +1189,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, fmt.Errorf("failed to reconcile namespace: %w", err) } - p, err := platform.GetPlatform(ctx, hcluster, r.ReleaseProvider, utilitiesImage, pullSecretBytes) + p, err := platform.GetPlatform(ctx, hcluster, releaseProvider, utilitiesImage, pullSecretBytes) if err != nil { return ctrl.Result{}, err } @@ -1339,37 +1414,6 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } } - // Reconcile the OIDCCAFile ConfigMap if - // * Authentication Type is OIDC - // * At least one OIDCProvider is set (only a single OIDCProvider is currently supported) - // * A configmap ref is set for the issuer CA - if !hyperutil.HCOAuthEnabled(hcluster) && - len(hcluster.Spec.Configuration.Authentication.OIDCProviders) != 0 && - hcluster.Spec.Configuration.Authentication.OIDCProviders[0].Issuer.CertificateAuthority.Name != "" { - caConfigMapName := hcluster.Spec.Configuration.Authentication.OIDCProviders[0].Issuer.CertificateAuthority.Name - var src corev1.ConfigMap - err = r.Client.Get(ctx, client.ObjectKey{Namespace: hcluster.Namespace, Name: caConfigMapName}, &src) - if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to get hostedcluster OIDCCAFile configmap %s: %w", caConfigMapName, err) - } - - dest := controlplaneoperator.OIDCCAConfigMap(controlPlaneNamespace.Name) - _, err = createOrUpdate(ctx, r.Client, dest, func() error { - srcData, srcHasData := src.Data["ca.crt"] - if !srcHasData { - return fmt.Errorf("hostedcluster OIDCCAFile configmap %q must have a ca.crt key", src.Name) - } - if dest.Data == nil { - dest.Data = map[string]string{} - } - dest.Data["ca.crt"] = srcData - return nil - }) - if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to reconcile controlplane OIDCCAFile configmap: %w", err) - } - } - // Reconcile the service account signing key if set if hcluster.Spec.ServiceAccountSigningKey != nil { if err := r.reconcileServiceAccountSigningKey(ctx, hcluster, controlPlaneNamespace.Name, createOrUpdate); err != nil { @@ -1455,9 +1499,13 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } // Reconcile the HostedControlPlane + isAutoscalingNeeded, err := r.isAutoscalingNeeded(ctx, hcluster) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to determine if autoscaler is needed: %w", err) + } hcp = controlplaneoperator.HostedControlPlane(controlPlaneNamespace.Name, hcluster.Name) _, err = createOrUpdate(ctx, r.Client, hcp, func() error { - return reconcileHostedControlPlane(hcp, hcluster) + return reconcileHostedControlPlane(hcp, hcluster, isAutoscalingNeeded) }) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile hostedcontrolplane: %w", err) @@ -1592,20 +1640,23 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } } - // Reconcile the CAPI manager components - err = r.reconcileCAPIManager(ctx, createOrUpdate, hcluster, hcp, pullSecretBytes) - if err != nil { - return ctrl.Result{}, fmt.Errorf("failed to reconcile capi manager: %w", err) - } + // Disable machine management components if enabled + if _, exists := hcluster.Annotations[hyperv1.DisableMachineManagement]; !exists { + // Reconcile the CAPI manager components + err = r.reconcileCAPIManager(ctx, createOrUpdate, hcluster, hcp, pullSecretBytes, &releaseProvider) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile capi manager: %w", err) + } - // Reconcile the CAPI provider components - if err = r.reconcileCAPIProvider(ctx, createOrUpdate, hcluster, hcp, capiProviderDeploymentSpec, p); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to reconcile capi provider: %w", err) + // Reconcile the CAPI provider components + if err = r.reconcileCAPIProvider(ctx, createOrUpdate, hcluster, hcp, capiProviderDeploymentSpec, p); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile capi provider: %w", err) + } } // Get release image version var releaseImageVersion semver.Version - releaseInfo, err := r.lookupReleaseImage(ctx, hcluster) + releaseInfo, err := r.lookupReleaseImage(ctx, hcluster, releaseProvider) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to lookup release image: %w", err) } @@ -1619,14 +1670,14 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques // TODO (alberto): drop this after dropping < 4.11 support. if !controlPlaneOperatorManagesMachineAutoscaler { // Reconcile the autoscaler. - err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes, releaseImageVersion) + err = r.reconcileAutoscaler(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes, releaseImageVersion, releaseProvider) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile autoscaler: %w", err) } } if !controlPlaneOperatorManagesMachineApprover { // Reconcile the machine approver. - if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes, releaseImageVersion); err != nil { + if err = r.reconcileMachineApprover(ctx, createOrUpdate, hcluster, hcp, utilitiesImage, pullSecretBytes, releaseImageVersion, releaseProvider); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile machine approver: %w", err) } } @@ -1647,14 +1698,22 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques } // Reconcile the control plane operator - err = r.reconcileControlPlaneOperator(ctx, createOrUpdate, hcluster, hcp, controlPlaneOperatorImage, utilitiesImage, defaultIngressDomain, cpoHasUtilities, openShiftTrustedCABundleConfigMapExists, r.CertRotationScale) + err = r.reconcileControlPlaneOperator(ctx, createOrUpdate, hcluster, hcp, controlPlaneOperatorImage, utilitiesImage, defaultIngressDomain, cpoHasUtilities, openShiftTrustedCABundleConfigMapExists, r.CertRotationScale, releaseImageVersion, releaseProvider) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile control plane operator: %w", err) } + if _, pkiDisabled := hcp.Annotations[hyperv1.DisablePKIReconciliationAnnotation]; controlPlanePKIOperatorSignsCSRs && !pkiDisabled { + // Reconcile the control plane PKI operator RBAC - the CPO does not have rights to do this itself + err = r.reconcileControlPlanePKIOperatorRBAC(ctx, createOrUpdate, hcluster) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to reconcile control plane PKI operator RBAC: %w", err) + } + } + // Reconcile the Ignition server if !controlplaneOperatorManagesIgnitionServer { - releaseInfo, err := r.lookupReleaseImage(ctx, hcluster) + releaseInfo, err := r.lookupReleaseImage(ctx, hcluster, releaseProvider) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to lookup release image: %w", err) } @@ -1666,8 +1725,8 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques hcp, defaultIngressDomain, ignitionServerHasHealthzHandler, - r.ReleaseProvider.GetRegistryOverrides(), - hyperutil.ConvertOpenShiftImageRegistryOverridesToCommandLineFlag(r.ReleaseProvider.GetOpenShiftImageRegistryOverrides()), + releaseProvider.GetRegistryOverrides(), + hyperutil.ConvertOpenShiftImageRegistryOverridesToCommandLineFlag(releaseProvider.GetOpenShiftImageRegistryOverrides()), r.ManagementClusterCapabilities.Has(capabilities.CapabilitySecurityContextConstraint), config.MutatingOwnerRefFromHCP(hcp, releaseImageVersion), ); err != nil { @@ -1714,7 +1773,7 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques // reconcileHostedControlPlane reconciles the given HostedControlPlane, which // will be mutated. -func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hyperv1.HostedCluster) error { +func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hyperv1.HostedCluster, isAutoscalingNeeded bool) error { hcp.Annotations = map[string]string{ HostedClusterAnnotation: client.ObjectKeyFromObject(hcluster).String(), } @@ -1732,17 +1791,24 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype hyperutil.DebugDeploymentsAnnotation, hyperv1.DisableProfilingAnnotation, hyperv1.PrivateIngressControllerAnnotation, + hyperv1.IngressControllerLoadBalancerScope, hyperv1.CleanupCloudResourcesAnnotation, hyperv1.ControlPlanePriorityClass, hyperv1.APICriticalPriorityClass, hyperv1.EtcdPriorityClass, hyperv1.EnsureExistsPullSecretReconciliation, hyperv1.TopologyAnnotation, + hyperv1.DisableMachineManagement, hyperv1.CertifiedOperatorsCatalogImageAnnotation, hyperv1.CommunityOperatorsCatalogImageAnnotation, hyperv1.RedHatMarketplaceCatalogImageAnnotation, hyperv1.RedHatOperatorsCatalogImageAnnotation, hyperv1.OLMCatalogsISRegistryOverridesAnnotation, + hyperv1.KubeAPIServerGOGCAnnotation, + hyperv1.KubeAPIServerGOMemoryLimitAnnotation, + hyperv1.RequestServingNodeAdditionalSelectorAnnotation, + hyperv1.KubeAPIServerVerbosityLevelAnnotation, + hyperv1.ManagementPlatformAnnotation, } for _, key := range mirroredAnnotations { val, hasVal := hcluster.Annotations[key] @@ -1759,6 +1825,11 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype } } + // Set the DisableClusterAutoscalerAnnotation if autoscaling is not needed + if !isAutoscalingNeeded { + hcp.Annotations[hyperv1.DisableClusterAutoscalerAnnotation] = "true" + } + hcp.Spec.Channel = hcluster.Spec.Channel hcp.Spec.ReleaseImage = hcluster.Spec.Release.Image if hcluster.Spec.ControlPlaneRelease != nil { @@ -1808,6 +1879,7 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype hcp.Spec.OLMCatalogPlacement = hcluster.Spec.OLMCatalogPlacement hcp.Spec.Autoscaling = hcluster.Spec.Autoscaling hcp.Spec.NodeSelector = hcluster.Spec.NodeSelector + hcp.Spec.ImageContentSources = hcluster.Spec.ImageContentSources // Pass through Platform spec. hcp.Spec.Platform = *hcluster.Spec.Platform.DeepCopy() @@ -1827,7 +1899,7 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype } // reconcileCAPIManager orchestrates orchestrates of all CAPI manager components. -func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, pullSecretBytes []byte) error { +func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, pullSecretBytes []byte, releaseProvider *releaseinfo.ProviderWithOpenShiftImageRegistryOverrides) error { controlPlaneNamespace := manifests.HostedControlPlaneNamespaceObject(hcluster.Namespace, hcluster.Name) err := r.Client.Get(ctx, client.ObjectKeyFromObject(controlPlaneNamespace), controlPlaneNamespace) if err != nil { @@ -1917,7 +1989,7 @@ func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, crea // Reconcile CAPI manager deployment var capiImage string if envImage := os.Getenv(images.CAPIEnvVar); len(envImage) > 0 { - version, err := hyperutil.GetPayloadVersion(ctx, r.ReleaseProvider, hcluster, pullSecretBytes) + version, err := hyperutil.GetPayloadVersion(ctx, *releaseProvider, hcluster, pullSecretBytes) if err != nil { return fmt.Errorf("failed to lookup payload version: %w", err) } @@ -1930,7 +2002,7 @@ func (r *HostedClusterReconciler) reconcileCAPIManager(ctx context.Context, crea capiImage = hcluster.Annotations[hyperv1.ClusterAPIManagerImage] } if capiImage == "" { - if capiImage, err = hyperutil.GetPayloadImage(ctx, r.ReleaseProvider, hcluster, ImageStreamCAPI, pullSecretBytes); err != nil { + if capiImage, err = hyperutil.GetPayloadImage(ctx, *releaseProvider, hcluster, ImageStreamCAPI, pullSecretBytes); err != nil { return fmt.Errorf("failed to retrieve capi image: %w", err) } } @@ -2004,7 +2076,7 @@ func (r *HostedClusterReconciler) reconcileCAPIProvider(ctx context.Context, cre // reconcileControlPlaneOperator orchestrates reconciliation of the control plane // operator components. -func (r *HostedClusterReconciler) reconcileControlPlaneOperator(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hostedControlPlane *hyperv1.HostedControlPlane, controlPlaneOperatorImage, utilitiesImage, defaultIngressDomain string, cpoHasUtilities bool, openShiftTrustedCABundleConfigMapExists bool, certRotationScale time.Duration) error { +func (r *HostedClusterReconciler) reconcileControlPlaneOperator(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hostedControlPlane *hyperv1.HostedControlPlane, controlPlaneOperatorImage, utilitiesImage, defaultIngressDomain string, cpoHasUtilities bool, openShiftTrustedCABundleConfigMapExists bool, certRotationScale time.Duration, releaseVersion semver.Version, releaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides) error { controlPlaneNamespace := manifests.HostedControlPlaneNamespaceObject(hcluster.Namespace, hcluster.Name) err := r.Client.Get(ctx, client.ObjectKeyFromObject(controlPlaneNamespace), controlPlaneNamespace) if err != nil { @@ -2019,9 +2091,14 @@ func (r *HostedClusterReconciler) reconcileControlPlaneOperator(ctx context.Cont } // Reconcile operator role + // hostNetwork is required for CPO <= 4.13 + needsHostNetwork := false + if hcluster.Spec.Platform.Type != hyperv1.IBMCloudPlatform && releaseVersion.Major == 4 && releaseVersion.Minor <= 13 { + needsHostNetwork = true + } controlPlaneOperatorRole := controlplaneoperator.OperatorRole(controlPlaneNamespace.Name) _, err = createOrUpdate(ctx, r.Client, controlPlaneOperatorRole, func() error { - return reconcileControlPlaneOperatorRole(controlPlaneOperatorRole, r.EnableCVOManagementClusterMetricsAccess) + return reconcileControlPlaneOperatorRole(controlPlaneOperatorRole, r.EnableCVOManagementClusterMetricsAccess, needsHostNetwork) }) if err != nil { return fmt.Errorf("failed to reconcile controlplane operator role: %w", err) @@ -2088,8 +2165,8 @@ func (r *HostedClusterReconciler) reconcileControlPlaneOperator(ctx context.Cont r.SetDefaultSecurityContext, controlPlaneOperatorServiceAccount, r.EnableCIDebugOutput, - hyperutil.ConvertRegistryOverridesToCommandLineFlag(r.ReleaseProvider.GetRegistryOverrides()), - hyperutil.ConvertOpenShiftImageRegistryOverridesToCommandLineFlag(r.ReleaseProvider.GetOpenShiftImageRegistryOverrides()), + hyperutil.ConvertRegistryOverridesToCommandLineFlag(releaseProvider.GetRegistryOverrides()), + hyperutil.ConvertOpenShiftImageRegistryOverridesToCommandLineFlag(releaseProvider.GetOpenShiftImageRegistryOverrides()), defaultIngressDomain, cpoHasUtilities, r.MetricsSet, @@ -2128,6 +2205,49 @@ func (r *HostedClusterReconciler) reconcileControlPlaneOperator(ctx context.Cont return nil } +func (r *HostedClusterReconciler) reconcileControlPlanePKIOperatorRBAC(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster) error { + // We don't create this ServiceAccount, the CPO does, but we can reference it in RBAC before it's created as the system is eventually consistent + serviceAccount := cpomanifests.PKIOperatorServiceAccount(manifests.HostedControlPlaneNamespace(hcluster.Namespace, hcluster.Name)) + + // Reconcile controlplane PKI operator CSR approver cluster role + controlPlanePKIOperatorCSRApproverClusterRole := controlplanepkioperatormanifests.CSRApproverClusterRole(hcluster) + _, err := createOrUpdate(ctx, r.Client, controlPlanePKIOperatorCSRApproverClusterRole, func() error { + return controlplanepkioperatormanifests.ReconcileCSRApproverClusterRole(controlPlanePKIOperatorCSRApproverClusterRole, hcluster, certificates.CustomerBreakGlassSigner, certificates.SREBreakGlassSigner) + }) + if err != nil { + return fmt.Errorf("failed to reconcile controlplane PKI operator CSR approver cluster role: %w", err) + } + + // Reconcile controlplane PKI operator CSR approver cluster role binding + controlPlanePKIOperatorCSRApproverClusterRoleBinding := controlplanepkioperatormanifests.ClusterRoleBinding(hcluster, controlPlanePKIOperatorCSRApproverClusterRole) + _, err = createOrUpdate(ctx, r.Client, controlPlanePKIOperatorCSRApproverClusterRoleBinding, func() error { + return controlplanepkioperatormanifests.ReconcileClusterRoleBinding(controlPlanePKIOperatorCSRApproverClusterRoleBinding, controlPlanePKIOperatorCSRApproverClusterRole, serviceAccount) + }) + if err != nil { + return fmt.Errorf("failed to reconcile controlplane PKI operator CSR approver cluster role binding: %w", err) + } + + // Reconcile controlplane PKI operator CSR signer cluster role + controlPlanePKIOperatorCSRSignerClusterRole := controlplanepkioperatormanifests.CSRSignerClusterRole(hcluster) + _, err = createOrUpdate(ctx, r.Client, controlPlanePKIOperatorCSRSignerClusterRole, func() error { + return controlplanepkioperatormanifests.ReconcileCSRSignerClusterRole(controlPlanePKIOperatorCSRSignerClusterRole, hcluster, certificates.CustomerBreakGlassSigner, certificates.SREBreakGlassSigner) + }) + if err != nil { + return fmt.Errorf("failed to reconcile controlplane PKI operator CSR signer cluster role: %w", err) + } + + // Reconcile controlplane PKI operator CSR signer cluster role binding + controlPlanePKIOperatorCSRSignerClusterRoleBinding := controlplanepkioperatormanifests.ClusterRoleBinding(hcluster, controlPlanePKIOperatorCSRSignerClusterRole) + _, err = createOrUpdate(ctx, r.Client, controlPlanePKIOperatorCSRSignerClusterRoleBinding, func() error { + return controlplanepkioperatormanifests.ReconcileClusterRoleBinding(controlPlanePKIOperatorCSRSignerClusterRoleBinding, controlPlanePKIOperatorCSRSignerClusterRole, serviceAccount) + }) + if err != nil { + return fmt.Errorf("failed to reconcile controlplane PKI operator CSR signer cluster role binding: %w", err) + } + + return nil +} + // reconcileOpenShiftTrustedCAs checks for the existence of /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem, if it exists, // creates a new ConfigMap to be mounted in the CPO deployment utilizing the file func (r *HostedClusterReconciler) reconcileOpenShiftTrustedCAs(ctx context.Context, hostedControlPlane *hyperv1.HostedControlPlane) (bool, error) { @@ -2175,8 +2295,8 @@ func servicePublishingStrategyByType(hcp *hyperv1.HostedCluster, svcType hyperv1 // reconcileAutoscaler orchestrates reconciliation of autoscaler components using // both the HostedCluster and the HostedControlPlane which the autoscaler takes // inputs from. -func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte, releaseVersion semver.Version) error { - clusterAutoscalerImage, err := hyperutil.GetPayloadImage(ctx, r.ReleaseProvider, hcluster, ImageStreamAutoscalerImage, pullSecretBytes) +func (r *HostedClusterReconciler) reconcileAutoscaler(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte, releaseVersion semver.Version, releaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides) error { + clusterAutoscalerImage, err := hyperutil.GetPayloadImage(ctx, releaseProvider, hcluster, ImageStreamAutoscalerImage, pullSecretBytes) if err != nil { return fmt.Errorf("failed to get image for cluster autoscaler: %w", err) } @@ -2199,7 +2319,7 @@ func (r *HostedClusterReconciler) reconcileCLISecrets(ctx context.Context, creat }) if err != nil { - return fmt.Errorf("failed to retrieve cli created secrets") + return fmt.Errorf("failed to retrieve cli created secrets: %v", err) } ownerRef := config.OwnerRefFrom(hcluster) @@ -2209,7 +2329,7 @@ func (r *HostedClusterReconciler) reconcileCLISecrets(ctx context.Context, creat return nil }) if err != nil { - return fmt.Errorf("failed to set secret's owner reference") + return fmt.Errorf("failed to set '%s' secret's owner reference: %v", secret.Name, err) } if res == controllerutil.OperationResultUpdated { log.Info("added owner reference of the Hosted cluster, to the secret", "secret", secret.Name) @@ -2255,6 +2375,35 @@ func GetControlPlaneOperatorImage(ctx context.Context, hc *hyperv1.HostedCluster return hypershiftOperatorImage, nil } +// GetControlPlaneOperatorImageLabels resolves the appropriate control plane +// operator image labels based on the following order of precedence (from most +// to least preferred): +// +// 1. The labels specified by the ControlPlaneOperatorImageLabelsAnnotation on the +// HostedCluster resource itself +// 2. The image labels in the medata of the image as resolved by GetControlPlaneOperatorImage +func GetControlPlaneOperatorImageLabels(ctx context.Context, hc *hyperv1.HostedCluster, controlPlaneOperatorImage string, pullSecret []byte, imageMetadataProvider hyperutil.ImageMetadataProvider) (map[string]string, error) { + if val, ok := hc.Annotations[hyperv1.ControlPlaneOperatorImageLabelsAnnotation]; ok { + annotatedLabels := map[string]string{} + rawLabels := strings.Split(val, ",") + for i, rawLabel := range rawLabels { + parts := strings.Split(rawLabel, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("hosted cluster %s/%s annotation %d malformed: label %s not in key=value form", hc.Namespace, hc.Name, i, rawLabel) + } + annotatedLabels[parts[0]] = parts[1] + } + return annotatedLabels, nil + } + + controlPlaneOperatorImageMetadata, err := imageMetadataProvider.ImageMetadata(ctx, controlPlaneOperatorImage, pullSecret) + if err != nil { + return nil, fmt.Errorf("failed to look up image metadata for %s: %w", controlPlaneOperatorImage, err) + } + + return hyperutil.ImageLabels(controlPlaneOperatorImageMetadata), nil +} + func reconcileControlPlaneOperatorDeployment( deployment *appsv1.Deployment, openShiftTrustedCABundleConfigMapExists bool, @@ -2363,6 +2512,26 @@ func reconcileControlPlaneOperatorDeployment( Name: "CERT_ROTATION_SCALE", Value: certRotationScale.String(), }, + { + Name: "CONTROL_PLANE_OPERATOR_IMAGE", + Value: cpoImage, + }, + { + Name: "HOSTED_CLUSTER_CONFIG_OPERATOR_IMAGE", + Value: cpoImage, + }, + { + Name: "SOCKS5_PROXY_IMAGE", + Value: utilitiesImage, + }, + { + Name: "AVAILABILITY_PROBER_IMAGE", + Value: utilitiesImage, + }, + { + Name: "TOKEN_MINTER_IMAGE", + Value: utilitiesImage, + }, metrics.MetricsSetToEnv(metricsSet), }, Command: []string{"/usr/bin/control-plane-operator"}, @@ -2403,6 +2572,26 @@ func reconcileControlPlaneOperatorDeployment( }, } + if hc.Annotations[certs.CertificateValidityAnnotation] != "" { + certValidity := hc.Annotations[certs.CertificateValidityAnnotation] + deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: certs.CertificateValidityEnvVar, + Value: certValidity, + }, + ) + } + + if hc.Annotations[certs.CertificateRenewalAnnotation] != "" { + certRenewalPercentage := hc.Annotations[certs.CertificateRenewalAnnotation] + deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, + corev1.EnvVar{ + Name: certs.CertificateRenewalEnvVar, + Value: certRenewalPercentage, + }, + ) + } + if openShiftTrustedCABundleConfigMapExists { hyperutil.DeploymentAddOpenShiftTrustedCABundleConfigMap(deployment) } @@ -2554,13 +2743,18 @@ func reconcileControlPlaneOperatorDeployment( return nil } -func reconcileControlPlaneOperatorRole(role *rbacv1.Role, enableCVOManagementClusterMetricsAccess bool) error { +func reconcileControlPlaneOperatorRole(role *rbacv1.Role, enableCVOManagementClusterMetricsAccess bool, hostNetwork bool) error { role.Rules = []rbacv1.PolicyRule{ { APIGroups: []string{"hypershift.openshift.io"}, Resources: []string{"*"}, Verbs: []string{"*"}, }, + { + APIGroups: []string{"certificates.hypershift.openshift.io"}, + Resources: []string{"*"}, + Verbs: []string{"*"}, + }, { APIGroups: []string{ "bootstrap.cluster.x-k8s.io", @@ -2651,12 +2845,6 @@ func reconcileControlPlaneOperatorRole(role *rbacv1.Role, enableCVOManagementClu "watch", }, }, - { - APIGroups: []string{"security.openshift.io"}, - ResourceNames: []string{"hostnetwork"}, - Resources: []string{"securitycontextconstraints"}, - Verbs: []string{"use"}, - }, // This is needed for CPO to grant Autoscaler its RBAC policy. { APIGroups: []string{"cluster.x-k8s.io"}, @@ -2715,6 +2903,16 @@ func reconcileControlPlaneOperatorRole(role *rbacv1.Role, enableCVOManagementClu Verbs: []string{"get"}, }) } + if hostNetwork { + role.Rules = append(role.Rules, + rbacv1.PolicyRule{ + APIGroups: []string{"security.openshift.io"}, + ResourceNames: []string{"hostnetwork"}, + Resources: []string{"securitycontextconstraints"}, + Verbs: []string{"use"}, + }) + } + return nil } @@ -3197,6 +3395,13 @@ func computeClusterVersionStatus(clock clock.WithTickerAndDelayedExecution, hclu return hcp.Status.VersionStatus } + // The following code is legacy support to preserve + // compatability with older HostedControlPlane controllers, which + // may not be populating hcp.Status.VersionStatus. + // + // It is also used before the HostedControlPlane is created to bootstrap + // the ClusterVersionStatus. + releaseImage := hyperutil.HCControlPlaneReleaseImage(hcluster) // If there's no history, rebuild it from scratch. @@ -3219,9 +3424,20 @@ func computeClusterVersionStatus(clock clock.WithTickerAndDelayedExecution, hclu // Assume the previous status is still current. version := hcluster.Status.Version.DeepCopy() - // The following code is legacy support to preserve - // compatability with older HostedControlPlane controllers, which - // may not be populating hcp.Status.VersionStatus. + // If a new rollout is needed, update the desired version and prepend a new + // partial history entry to unblock rollouts. + if releaseImage != hcluster.Status.Version.Desired.Image { + version.Desired.Image = releaseImage + version.ObservedGeneration = hcluster.Generation + // TODO: leaky + version.History = append([]configv1.UpdateHistory{ + { + State: configv1.PartialUpdate, + Image: releaseImage, + StartedTime: metav1.NewTime(clock.Now()), + }, + }, version.History...) + } // If the hosted control plane doesn't exist, there's no way to assess the // rollout so return early. @@ -3250,22 +3466,6 @@ func computeClusterVersionStatus(clock clock.WithTickerAndDelayedExecution, hclu version.History[0].CompletionTime = hcp.Status.LastReleaseImageTransitionTime.DeepCopy() } - // If a new rollout is needed, update the desired version and prepend a new - // partial history entry to unblock rollouts. - rolloutNeeded := releaseImage != hcluster.Status.Version.Desired.Image - if rolloutNeeded { - version.Desired.Image = releaseImage - version.ObservedGeneration = hcluster.Generation - // TODO: leaky - version.History = append([]configv1.UpdateHistory{ - { - State: configv1.PartialUpdate, - Image: releaseImage, - StartedTime: metav1.NewTime(clock.Now()), - }, - }, version.History...) - } - return version } @@ -3287,6 +3487,12 @@ func computeHostedClusterAvailability(hcluster *hyperv1.HostedCluster, hcp *hype hcpAvailableReason = hyperv1.AsExpectedReason hcpAvailableMessage = "The hosted control plane is available" } + } else { + // This catches and bubbles up validation errors that prevent the HCP from being created. + hcProgressingCondition := meta.FindStatusCondition(hcluster.Status.Conditions, string(hyperv1.HostedClusterProgressing)) + if hcProgressingCondition != nil && hcProgressingCondition.Reason == hyperv1.BlockedReason { + hcpAvailableMessage = hcProgressingCondition.Message + } } return metav1.Condition{ @@ -3681,8 +3887,8 @@ func (r *HostedClusterReconciler) reconcileClusterPrometheusRBAC(ctx context.Con return nil } -func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte, releaseVersion semver.Version) error { - machineApproverImage, err := hyperutil.GetPayloadImage(ctx, r.ReleaseProvider, hcluster, ImageStreamClusterMachineApproverImage, pullSecretBytes) +func (r *HostedClusterReconciler) reconcileMachineApprover(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, utilitiesImage string, pullSecretBytes []byte, releaseVersion semver.Version, releaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides) error { + machineApproverImage, err := hyperutil.GetPayloadImage(ctx, releaseProvider, hcluster, ImageStreamClusterMachineApproverImage, pullSecretBytes) if err != nil { return fmt.Errorf("failed to get image for machine approver: %w", err) } @@ -3769,7 +3975,7 @@ func (r *HostedClusterReconciler) validateUserCAConfigMaps(ctx context.Context, return errs } -func (r *HostedClusterReconciler) validateReleaseImage(ctx context.Context, hc *hyperv1.HostedCluster) error { +func (r *HostedClusterReconciler) validateReleaseImage(ctx context.Context, hc *hyperv1.HostedCluster, releaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides) error { if _, exists := hc.Annotations[hyperv1.SkipReleaseImageValidation]; exists { return nil } @@ -3782,7 +3988,7 @@ func (r *HostedClusterReconciler) validateReleaseImage(ctx context.Context, hc * return fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey) } - releaseInfo, err := r.lookupReleaseImage(ctx, hc) + releaseInfo, err := r.lookupReleaseImage(ctx, hc, releaseProvider) if err != nil { return fmt.Errorf("failed to lookup release image: %w", err) } @@ -3793,7 +3999,7 @@ func (r *HostedClusterReconciler) validateReleaseImage(ctx context.Context, hc * var currentVersion *semver.Version if hc.Status.Version != nil && hc.Status.Version.Desired.Image != hyperutil.HCControlPlaneReleaseImage(hc) { - releaseInfo, err := r.ReleaseProvider.Lookup(ctx, hc.Status.Version.Desired.Image, pullSecretBytes) + releaseInfo, err := releaseProvider.Lookup(ctx, hc.Status.Version.Desired.Image, pullSecretBytes) if err != nil { return fmt.Errorf("failed to lookup release image: %w", err) } @@ -3813,7 +4019,7 @@ func isProgressing(hc *hyperv1.HostedCluster, releaseImage *releaseinfo.ReleaseI switch string(condition.Type) { case string(hyperv1.SupportedHostedCluster), string(hyperv1.ValidHostedClusterConfiguration), string(hyperv1.ValidReleaseImage), string(hyperv1.ReconciliationActive): if condition.Status == metav1.ConditionFalse { - return false, fmt.Errorf("%s condition is false", string(condition.Type)) + return false, fmt.Errorf("%s condition is false: %s", string(condition.Type), condition.Message) } case string(hyperv1.ClusterVersionUpgradeable): _, _, err := isUpgrading(hc, releaseImage) @@ -4442,7 +4648,7 @@ func (r *HostedClusterReconciler) reconcileAWSSubnets(ctx context.Context, creat return nil } -func (r *HostedClusterReconciler) lookupReleaseImage(ctx context.Context, hcluster *hyperv1.HostedCluster) (*releaseinfo.ReleaseImage, error) { +func (r *HostedClusterReconciler) lookupReleaseImage(ctx context.Context, hcluster *hyperv1.HostedCluster, releaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides) (*releaseinfo.ReleaseImage, error) { var pullSecret corev1.Secret if err := r.Client.Get(ctx, types.NamespacedName{Namespace: hcluster.Namespace, Name: hcluster.Spec.PullSecret.Name}, &pullSecret); err != nil { return nil, fmt.Errorf("failed to get pull secret: %w", err) @@ -4451,7 +4657,20 @@ func (r *HostedClusterReconciler) lookupReleaseImage(ctx context.Context, hclust if !ok { return nil, fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey) } - return r.ReleaseProvider.Lookup(ctx, hyperutil.HCControlPlaneReleaseImage(hcluster), pullSecretBytes) + return releaseProvider.Lookup(ctx, hyperutil.HCControlPlaneReleaseImage(hcluster), pullSecretBytes) +} + +func (r *HostedClusterReconciler) isAutoscalingNeeded(ctx context.Context, hcluster *hyperv1.HostedCluster) (bool, error) { + nodePools, err := listNodePools(ctx, r.Client, hcluster.Namespace, hcluster.Name) + if err != nil { + return false, fmt.Errorf("failed to get nodePools by cluster name for cluster %q: %w", hcluster.Name, err) + } + for _, nodePool := range nodePools { + if nodePool.Spec.AutoScaling != nil { + return true, nil + } + } + return false, nil } // isUpgrading returns @@ -4732,6 +4951,24 @@ func (r *HostedClusterReconciler) reconcileKubevirtPlatformDefaultSettings(ctx c } } + // Reconcile management infrastructure annotation + if _, exists := hc.Annotations[hyperv1.ManagementPlatformAnnotation]; !exists { + if hc.Annotations == nil { + hc.Annotations = map[string]string{} + } + mgmtInfraKey := client.ObjectKey{Name: "cluster"} + mgmtInfra := &configv1.Infrastructure{} + + if err := r.Get(ctx, mgmtInfraKey, mgmtInfra); err != nil { + return fmt.Errorf("failed to get infrastructure.config.openshift.io status: %w", err) + } + mgmtPlatformType := mgmtInfra.Status.PlatformStatus.Type + hc.Annotations[hyperv1.ManagementPlatformAnnotation] = string(mgmtPlatformType) + if err := r.Client.Update(ctx, hc); err != nil { + return fmt.Errorf("failed to update hostedcluster %s annotation: %w", hc.Name, err) + } + } + return nil } diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go index c5605de206..1466cfcfd3 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_controller_test.go @@ -4,11 +4,12 @@ import ( "context" "errors" "fmt" - "github.com/openshift/hypershift/cmd/util" "reflect" "testing" "time" + "github.com/openshift/hypershift/cmd/util" + "github.com/go-logr/logr" "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" @@ -27,9 +28,11 @@ import ( "github.com/openshift/hypershift/support/capabilities" fakecapabilities "github.com/openshift/hypershift/support/capabilities/fake" "github.com/openshift/hypershift/support/config" + "github.com/openshift/hypershift/support/releaseinfo" fakereleaseprovider "github.com/openshift/hypershift/support/releaseinfo/fake" "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client" "github.com/openshift/hypershift/support/upsert" + hyperutil "github.com/openshift/hypershift/support/util" "github.com/openshift/hypershift/support/util/fakeimagemetadataprovider" "go.uber.org/zap/zapcore" appsv1 "k8s.io/api/apps/v1" @@ -143,7 +146,10 @@ func TestHasBeenAvailable(t *testing.T) { CertRotationScale: 24 * time.Hour, createOrUpdate: func(reconcile.Request) upsert.CreateOrUpdateFN { return ctrl.CreateOrUpdate }, ManagementClusterCapabilities: &fakecapabilities.FakeSupportNoCapabilities{}, - now: func() metav1.Time { return reconcilerNow }, + ReconcileMetadataProviders: func(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + return &fakereleaseprovider.FakeReleaseProvider{}, &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{}}, nil + }, + now: func() metav1.Time { return reconcilerNow }, } ctx := context.Background() @@ -260,7 +266,7 @@ func TestReconcileHostedControlPlaneUpgrades(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { updated := test.ControlPlane.DeepCopy() - err := reconcileHostedControlPlane(updated, &test.Cluster) + err := reconcileHostedControlPlane(updated, &test.Cluster, true) if err != nil { t.Error(err) } @@ -392,7 +398,7 @@ func TestReconcileHostedControlPlaneAPINetwork(t *testing.T) { hostedCluster := &hyperv1.HostedCluster{} hostedCluster.Spec.Networking.APIServer = test.networking hostedControlPlane := &hyperv1.HostedControlPlane{} - err := reconcileHostedControlPlane(hostedControlPlane, hostedCluster) + err := reconcileHostedControlPlane(hostedControlPlane, hostedCluster, true) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -450,7 +456,7 @@ func TestReconcileHostedControlPlaneConfiguration(t *testing.T) { hostedControlPlane := &hyperv1.HostedControlPlane{} g := NewGomegaWithT(t) - err := reconcileHostedControlPlane(hostedControlPlane, hostedCluster) + err := reconcileHostedControlPlane(hostedControlPlane, hostedCluster, true) g.Expect(err).ToNot(HaveOccurred()) // DeepEqual to check that all ClusterConfiguration fields are deep copied to HostedControlPlane @@ -1031,6 +1037,16 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { }, }, }, + &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Status: configv1.InfrastructureStatus{ + PlatformStatus: &configv1.PlatformStatus{ + Type: configv1.AWSPlatformType, + }, + }, + }, } for _, cluster := range hostedClusters { cluster.Spec.Services = []hyperv1.ServicePublishingStrategyMapping{ @@ -1058,10 +1074,11 @@ func TestHostedClusterWatchesEverythingItCreates(t *testing.T) { capabilities.CapabilityIngress, capabilities.CapabilityProxy, ), - createOrUpdate: func(reconcile.Request) upsert.CreateOrUpdateFN { return ctrl.CreateOrUpdate }, - ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{}, - ImageMetadataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{}}, - now: metav1.Now, + createOrUpdate: func(reconcile.Request) upsert.CreateOrUpdateFN { return ctrl.CreateOrUpdate }, + ReconcileMetadataProviders: func(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + return &fakereleaseprovider.FakeReleaseProvider{}, &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{}}, nil + }, + now: metav1.Now, } r.KubevirtInfraClients = kvinfra.NewMockKubevirtInfraClientMap(&createTypeTrackingClient{Client: fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(objects...).Build()}, @@ -1970,30 +1987,40 @@ func TestValidateReleaseImage(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + g := NewGomegaWithT(t) r := &HostedClusterReconciler{ CertRotationScale: 24 * time.Hour, Client: fake.NewClientBuilder().WithObjects(tc.other...).Build(), - ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{ - ImageVersion: map[string]string{ - "image-4.7.0": "4.7.0", - "image-4.9.0": "4.9.0", - "image-4.10.0": "4.10.0", - "image-4.11.0": "4.11.0", - "image-4.11.1": "4.11.1", - "image-4.12.0": "4.12.0", - "image-4.13.0": "4.13.0", - "image-4.14.0": "4.14.0", - }, + ReconcileMetadataProviders: func(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + return &fakereleaseprovider.FakeReleaseProvider{ + ImageVersion: map[string]string{ + "image-4.7.0": "4.7.0", + "image-4.9.0": "4.9.0", + "image-4.10.0": "4.10.0", + "image-4.11.0": "4.11.0", + "image-4.11.1": "4.11.1", + "image-4.12.0": "4.12.0", + "image-4.13.0": "4.13.0", + "image-4.14.0": "4.14.0", + "image-4.15.0": "4.15.0", + "image-4.16.0": "4.16.0", + }, + }, + &fakeimagemetadataprovider.FakeImageMetadataProvider{ + Result: &dockerv1client.DockerImageConfig{}, + }, + nil }, } ctx := context.Background() - actual := r.validateReleaseImage(ctx, tc.hostedCluster) + releaseProvider, _, err := r.ReconcileMetadataProviders(ctx, nil) + g.Expect(err).ToNot(HaveOccurred()) + actual := r.validateReleaseImage(ctx, tc.hostedCluster, releaseProvider) if diff := cmp.Diff(actual, tc.expectedResult, equateErrorMessage); diff != "" { t.Errorf("actual validation result differs from expected: %s", diff) } if tc.expectedNotFoundError { - g := NewGomegaWithT(t) g.Expect(errors2.IsNotFound(actual)).To(BeTrue()) } }) @@ -2313,17 +2340,27 @@ func TestIsUpgradeable(t *testing.T) { r := &HostedClusterReconciler{ CertRotationScale: 24 * time.Hour, Client: fake.NewClientBuilder().WithObjects(objs...).Build(), - ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{ - ImageVersion: map[string]string{ - "image-4.12": "4.12.0", - "image-4.12.1": "4.12.1", - "image-4.13": "4.13.0", - }, + ReconcileMetadataProviders: func(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + return &fakereleaseprovider.FakeReleaseProvider{ + ImageVersion: map[string]string{ + "image-4.12": "4.12.0", + "image-4.12.1": "4.12.1", + "image-4.13": "4.14.0", + }, + }, + &fakeimagemetadataprovider.FakeImageMetadataProvider{ + Result: &dockerv1client.DockerImageConfig{}, + }, + nil }, } t.Run(test.name, func(t *testing.T) { - releaseImage, err := r.lookupReleaseImage(context.TODO(), test.hc) + ctx := context.Background() + g := NewGomegaWithT(t) + releaseProvider, _, err := r.ReconcileMetadataProviders(ctx, nil) + g.Expect(err).ToNot(HaveOccurred()) + releaseImage, err := r.lookupReleaseImage(context.TODO(), test.hc, releaseProvider) if err != nil { t.Errorf("isUpgrading() internal err = %v", err) } @@ -2661,16 +2698,26 @@ func TestIsProgressing(t *testing.T) { r := &HostedClusterReconciler{ CertRotationScale: 24 * time.Hour, Client: fake.NewClientBuilder().WithObjects(objs...).Build(), - ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{ - ImageVersion: map[string]string{ - "release-1.2": "1.2.0", - "release-1.3": "1.3.0", - }, + ReconcileMetadataProviders: func(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + return &fakereleaseprovider.FakeReleaseProvider{ + ImageVersion: map[string]string{ + "release-1.2": "1.2.0", + "release-1.3": "1.3.0", + }, + }, + &fakeimagemetadataprovider.FakeImageMetadataProvider{ + Result: &dockerv1client.DockerImageConfig{}, + }, + nil }, } t.Run(tt.name, func(t *testing.T) { - releaseImage, err := r.lookupReleaseImage(context.TODO(), tt.hc) + ctx := context.Background() + g := NewGomegaWithT(t) + releaseProvider, _, err := r.ReconcileMetadataProviders(ctx, nil) + g.Expect(err).ToNot(HaveOccurred()) + releaseImage, err := r.lookupReleaseImage(context.TODO(), tt.hc, releaseProvider) if err != nil { t.Errorf("isProgressing() internal err = %v", err) } @@ -3500,6 +3547,17 @@ func TestKubevirtETCDEncKey(t *testing.T) { } { t.Run(testCase.name, func(tt *testing.T) { testCase.objects = append(testCase.objects, testCase.hc) + infra := &configv1.Infrastructure{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Status: configv1.InfrastructureStatus{ + PlatformStatus: &configv1.PlatformStatus{ + Type: configv1.KubevirtPlatformType, + }, + }, + } + testCase.objects = append(testCase.objects, infra) client := &createTypeTrackingClient{Client: fake.NewClientBuilder(). WithScheme(api.Scheme). WithObjects(testCase.objects...). @@ -3515,10 +3573,11 @@ func TestKubevirtETCDEncKey(t *testing.T) { capabilities.CapabilityIngress, capabilities.CapabilityProxy, ), - createOrUpdate: func(reconcile.Request) upsert.CreateOrUpdateFN { return ctrl.CreateOrUpdate }, - ReleaseProvider: &fakereleaseprovider.FakeReleaseProvider{}, - ImageMetadataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{}}, - now: metav1.Now, + createOrUpdate: func(reconcile.Request) upsert.CreateOrUpdateFN { return ctrl.CreateOrUpdate }, + ReconcileMetadataProviders: func(ctx context.Context, imgOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + return &fakereleaseprovider.FakeReleaseProvider{}, &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{}}, nil + }, + now: metav1.Now, } if _, err := r.Reconcile(context.Background(), reconcile.Request{NamespacedName: types.NamespacedName{Namespace: testCase.hc.Namespace, Name: testCase.hc.Name}}); err != nil { diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go index b5f28a9083..1c3da04296 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook.go @@ -2,34 +2,24 @@ package hostedcluster import ( "context" - "errors" "fmt" - "time" - "github.com/blang/semver" jsonpatch "github.com/evanphx/json-patch/v5" "github.com/go-logr/logr" apiexample "github.com/openshift/hypershift/api/fixtures" - "golang.org/x/sync/errgroup" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" - "github.com/openshift/hypershift/hypershift-operator/controllers/manifests" - "github.com/openshift/hypershift/kubevirtexternalinfra" "github.com/openshift/hypershift/support/supportedversion" hyperutil "github.com/openshift/hypershift/support/util" ) -const versionLabel = "io.openshift.release" - type hostedClusterDefaulter struct { } @@ -130,12 +120,6 @@ func (defaulter *nodePoolDefaulter) Default(ctx context.Context, obj runtime.Obj // SetupWebhookWithManager sets up HostedCluster webhooks. func SetupWebhookWithManager(mgr ctrl.Manager, imageMetaDataProvider *hyperutil.RegistryClientImageMetadataProvider, logger logr.Logger) error { - kvValidator = &kubevirtClusterValidator{ - client: mgr.GetClient(), - clientMap: kubevirtexternalinfra.NewKubevirtInfraClientMap(), - imageMetaDataProvider: imageMetaDataProvider, - } - err := ctrl.NewWebhookManagedBy(mgr). For(&hyperv1.HostedCluster{}). WithDefaulter(&hostedClusterDefaulter{}). @@ -147,7 +131,7 @@ func SetupWebhookWithManager(mgr ctrl.Manager, imageMetaDataProvider *hyperutil. err = ctrl.NewWebhookManagedBy(mgr). For(&hyperv1.NodePool{}). WithDefaulter(&nodePoolDefaulter{client: mgr.GetClient()}). - WithValidator(newNodePoolValidator(mgr.GetClient(), logger)). + WithValidator(newNodePoolValidator(logger)). Complete() if err != nil { return fmt.Errorf("unable to register nodepool webhook: %w", err) @@ -161,8 +145,6 @@ func SetupWebhookWithManager(mgr ctrl.Manager, imageMetaDataProvider *hyperutil. return nil } -var kvValidator *kubevirtClusterValidator - var _ admission.CustomValidator = (*hostedClusterValidator)(nil) type hostedClusterValidator struct{} @@ -212,7 +194,7 @@ func (v hostedClusterValidator) validateCreateKubevirtHostedCluster(ctx context. return nil, err } - return kvValidator.validateCreate(ctx, hc, hyperutil.HCControlPlaneReleaseImage(hc)) + return nil, nil } func (v hostedClusterValidator) validateUpdateKubevirtHostedCluster(ctx context.Context, oldHC, newHC *hyperv1.HostedCluster) error { @@ -221,32 +203,15 @@ func (v hostedClusterValidator) validateUpdateKubevirtHostedCluster(ctx context. return err } - newReleaseImage := hyperutil.HCControlPlaneReleaseImage(newHC) - - if newReleaseImage != hyperutil.HCControlPlaneReleaseImage(oldHC) { - if _, isTimeout := ctx.Deadline(); !isTimeout { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Second*10) - defer cancel() - } - - err = kvValidator.validateReleaseImage(ctx, newHC, newReleaseImage) - if err != nil { - return err - } - } - return nil } type nodePoolValidator struct { - client client.Client logger logr.Logger } -func newNodePoolValidator(client client.Client, logger logr.Logger) *nodePoolValidator { +func newNodePoolValidator(logger logr.Logger) *nodePoolValidator { return &nodePoolValidator{ - client: client, logger: logr.New(logger.GetSink()).WithName("nodePoolValidator"), } } @@ -297,12 +262,7 @@ func (v nodePoolValidator) validateCreateKubevirtNodePool(ctx context.Context, n return nil, err } - hc := v.getHostedClusterOrSkip(ctx, np) - if hc == nil { - return nil, nil - } - - return kvValidator.validateCreate(ctx, hc, np.Spec.Release.Image) + return nil, nil } func (v nodePoolValidator) validateUpdateKubevirtNodePool(ctx context.Context, oldNP, newNP *hyperv1.NodePool) error { @@ -310,129 +270,9 @@ func (v nodePoolValidator) validateUpdateKubevirtNodePool(ctx context.Context, o if err != nil { return err } - - if oldNP.Spec.Release.Image != newNP.Spec.Release.Image { - if _, isTimeout := ctx.Deadline(); !isTimeout { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Second*10) - defer cancel() - } - hc := v.getHostedClusterOrSkip(ctx, newNP) - if hc == nil { - return nil - } - - err = kvValidator.validateReleaseImage(ctx, hc, newNP.Spec.Release.Image) - if err != nil { - return err - } - } - return nil } -func (v nodePoolValidator) getHostedClusterOrSkip(ctx context.Context, np *hyperv1.NodePool) *hyperv1.HostedCluster { - hc := &hyperv1.HostedCluster{} - err := v.client.Get(ctx, client.ObjectKey{Name: np.Spec.ClusterName, Namespace: np.Namespace}, hc) - if err != nil { - v.logger.Error(err, "can't find HostedCluster; skipping NodePool validation", - "HostedCluster", np.Spec.ClusterName, - "NodePool", np.Name, - "namespace", np.Namespace) - return nil - } - - return hc -} - -type kubevirtClusterValidator struct { - client client.Client - clientMap kubevirtexternalinfra.KubevirtInfraClientMap - imageMetaDataProvider hyperutil.ImageMetadataProvider -} - -func (v *kubevirtClusterValidator) validateCreate(ctx context.Context, hc *hyperv1.HostedCluster, releaseImage string) (admission.Warnings, error) { - if v == nil { - return nil, errors.New("kubevirt validator is not initialized") // should never happen - } - - if hc.Spec.Platform.Kubevirt == nil { - return nil, fmt.Errorf("the spec.platform.kubevirt field is missing in the HostedCluster resource") - } - - controlPlaneNamespace := manifests.HostedControlPlaneNamespace(hc.Namespace, hc.Name) - cl, err := v.clientMap.DiscoverKubevirtClusterClient(ctx, v.client, hc.Spec.InfraID, hc.Spec.Platform.Kubevirt.Credentials, controlPlaneNamespace, hc.Namespace) - if err != nil { - return nil, fmt.Errorf("failed to connect external infra cluster; %w", err) - } - - if _, isTimeout := ctx.Deadline(); !isTimeout { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Second*10) - defer cancel() - } - - eg, egCtx := errgroup.WithContext(ctx) - - eg.Go(func() error { - return v.validateReleaseImage(egCtx, hc, releaseImage) - }) - - eg.Go(func() error { - return kubevirtexternalinfra.ValidateClusterVersions(egCtx, cl) - }) - - return nil, eg.Wait() -} - -func (v *kubevirtClusterValidator) validateReleaseImage(ctx context.Context, hc *hyperv1.HostedCluster, releaseImage string) error { - if _, exists := hc.Annotations[hyperv1.SkipReleaseImageValidation]; exists { - return nil - } - - version, err := v.getImageVersion(ctx, hc, releaseImage) - if err != nil { - return err - } - - if version == nil { - return nil - } - - minSupportedVersion := supportedversion.GetMinSupportedVersion(hc) - - return supportedversion.IsValidReleaseVersion(version, nil, &supportedversion.LatestSupportedVersion, &minSupportedVersion, hc.Spec.Networking.NetworkType, hc.Spec.Platform.Type) -} - -func (v *kubevirtClusterValidator) getImageVersion(ctx context.Context, hc *hyperv1.HostedCluster, releaseImage string) (*semver.Version, error) { - var pullSecret corev1.Secret - err := v.client.Get(ctx, types.NamespacedName{Namespace: hc.Namespace, Name: hc.Spec.PullSecret.Name}, &pullSecret) - if err != nil { - return nil, fmt.Errorf("failed to get pull secret: %w", err) - } - pullSecretBytes, ok := pullSecret.Data[corev1.DockerConfigJsonKey] - if !ok { - return nil, fmt.Errorf("expected %s key in pull secret", corev1.DockerConfigJsonKey) - } - - metadata, err := v.imageMetaDataProvider.ImageMetadata(ctx, releaseImage, pullSecretBytes) - if err != nil { - return nil, fmt.Errorf("failed to retrive release image metadata: %w", err) - } - - ver, ok := metadata.Config.Labels[versionLabel] - if !ok { // no version. Can't validate - return nil, nil - } - - version, err := semver.Parse(ver) - if err != nil { - return nil, fmt.Errorf("wrong version structure %q: %w", ver, err) - } - - return &version, nil -} - func validateJsonAnnotation(annotations map[string]string) error { if ann, exists := annotations[hyperv1.JSONPatchAnnotation]; exists { patch, err := jsonpatch.DecodePatch([]byte(ann)) diff --git a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go index 5b61cf76f9..e31df5f890 100644 --- a/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go +++ b/hypershift-operator/controllers/hostedcluster/hostedcluster_webhook_test.go @@ -2,36 +2,17 @@ package hostedcluster import ( "context" - "fmt" "reflect" "testing" - "github.com/openshift/api/image/docker10" - "github.com/openshift/hypershift/support/thirdparty/library-go/pkg/image/dockerv1client" - hyperutil "github.com/openshift/hypershift/support/util" - "github.com/openshift/hypershift/support/util/fakeimagemetadataprovider" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client/fake" apiexample "github.com/openshift/hypershift/api/fixtures" "github.com/openshift/hypershift/api/hypershift/v1beta1" - "github.com/openshift/hypershift/kubevirtexternalinfra" - "github.com/openshift/hypershift/support/api" ) func TestValidateKVHostedClusterCreate(t *testing.T) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pull-secret", - Namespace: "myns", - }, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte("test secret"), - }, - } - for _, testCase := range []struct { name string hc *v1beta1.HostedCluster @@ -94,9 +75,44 @@ func TestValidateKVHostedClusterCreate(t *testing.T) { expectError: true, imageVersion: "4.15.0", }, + } { + t.Run(testCase.name, func(tt *testing.T) { + hcVal := &hostedClusterValidator{} + warnings, err := hcVal.ValidateCreate(context.Background(), testCase.hc) + + if testCase.expectError && err == nil { + t.Error("should return error but didn't") + } else if !testCase.expectError && err != nil { + t.Errorf("should not return error but returned %q", err.Error()) + } + if testCase.expectWarnings && warnings == nil { + t.Error("should return warnings but didn't") + } else if !testCase.expectWarnings && warnings != nil { + t.Errorf("should not return warnings but returned %q", warnings) + } + }) + } +} + +func TestValidateKVHostedClusterUpdate(t *testing.T) { + for _, testCase := range []struct { + name string + oldHC *v1beta1.HostedCluster + newHC *v1beta1.HostedCluster + expectError bool + expectWarnings bool + imageVersion string + }{ { - name: "cnv version not supported", - hc: &v1beta1.HostedCluster{ + name: "happy case - versions are valid", + oldHC: &v1beta1.HostedCluster{ + Spec: v1beta1.HostedClusterSpec{ + Release: v1beta1.Release{ + Image: "image-4.13.0", + }, + }, + }, + newHC: &v1beta1.HostedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-under-test", Namespace: "myns", @@ -114,17 +130,25 @@ func TestValidateKVHostedClusterCreate(t *testing.T) { }, }, }, - cnvVersion: "0.111.0", - k8sVersion: "1.27.0", - expectError: true, + expectError: false, imageVersion: "4.15.0", }, { - name: "k8s version not supported", - hc: &v1beta1.HostedCluster{ + name: "wrong json", + oldHC: &v1beta1.HostedCluster{ + Spec: v1beta1.HostedClusterSpec{ + Release: v1beta1.Release{ + Image: "image-4.13.0", + }, + }, + }, + newHC: &v1beta1.HostedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-under-test", Namespace: "myns", + Annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `[{`, + }, }, Spec: v1beta1.HostedClusterSpec{ Platform: v1beta1.PlatformSpec{ @@ -139,13 +163,108 @@ func TestValidateKVHostedClusterCreate(t *testing.T) { }, }, }, - cnvVersion: "1.0.0", - k8sVersion: "1.26.99", expectError: true, imageVersion: "4.15.0", }, + } { + t.Run(testCase.name, func(tt *testing.T) { + hcVal := &hostedClusterValidator{} + warnings, err := hcVal.ValidateUpdate(context.Background(), testCase.oldHC, testCase.newHC) + + if testCase.expectError && err == nil { + t.Error("should return error but didn't") + } else if !testCase.expectError && err != nil { + t.Errorf("should not return error but returned %q", err.Error()) + } + if testCase.expectWarnings && warnings == nil { + t.Error("should return warnings but didn't") + } else if !testCase.expectWarnings && warnings != nil { + t.Errorf("should not return warnings but returned %q", warnings) + } + }) + } +} + +func TestValidateJsonAnnotation(t *testing.T) { + for _, tc := range []struct { + name string + annotations map[string]string + expectError bool + }{ + { + name: "no annotation", + annotations: nil, + expectError: false, + }, + { + name: "valid annotation", + annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `[{"op": "replace","path": "/spec/domain/cpu/cores","value": 3}]`, + }, + expectError: false, + }, + { + name: "not an array", + annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `{"op": "replace","path": "/spec/domain/cpu/cores","value": 3}`, + }, + expectError: true, + }, + { + name: "corrupted json", + annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `[{"op": "replace","path": "/spec/domain/cpu/cores","value": 3}`, + }, + expectError: true, + }, + { + name: "missing op", + annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `[{"path": "/spec/domain/cpu/cores","value": 3}]`, + }, + expectError: true, + }, + { + name: "missing path", + annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `[{"op": "replace","value": 3}]`, + }, + expectError: true, + }, + { + name: "missing value", + annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `[{"op": "replace","path": "/spec/domain/cpu/cores"}]`, + }, + expectError: true, + }, + } { + t.Run(tc.name, func(tt *testing.T) { + err := validateJsonAnnotation(tc.annotations) + if (err != nil) != tc.expectError { + errMsgBool := []string{" ", "did"} + if !tc.expectError { + errMsgBool = []string{" not ", "didn't"} + } + tt.Errorf("should%sreturn error, but it %s. error: %v", errMsgBool[0], errMsgBool[1], err) + } + }) + } +} + +func TestValidateKVNodePoolCreate(t *testing.T) { + for _, testCase := range []struct { + name string + hc *v1beta1.HostedCluster + np *v1beta1.NodePool + cnvVersion string + k8sVersion string + expectError bool + expectWarnings bool + imageVersion string + }{ { - name: "no kubevirt field", + name: "happy case - versions are valid", hc: &v1beta1.HostedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-under-test", @@ -153,55 +272,41 @@ func TestValidateKVHostedClusterCreate(t *testing.T) { }, Spec: v1beta1.HostedClusterSpec{ Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, + Type: v1beta1.KubevirtPlatform, + Kubevirt: &v1beta1.KubevirtPlatformSpec{}, }, PullSecret: corev1.LocalObjectReference{ Name: "pull-secret", }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, }, }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: true, - imageVersion: "4.15.0", - }, - { - name: "image version too old", - hc: &v1beta1.HostedCluster{ + np: &v1beta1.NodePool{ ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", + Name: "np-under-test", Namespace: "myns", }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ + Spec: v1beta1.NodePoolSpec{ + ClusterName: "cluster-under-test", + Platform: v1beta1.NodePoolPlatform{ Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", + Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, }, Release: v1beta1.Release{ - Image: "image-4.13.0", + Image: "image-4.15.0", }, }, }, cnvVersion: "1.0.0", k8sVersion: "1.27.0", - expectError: true, - imageVersion: "4.13.0", + expectError: false, + imageVersion: "4.15.0", }, { - name: fmt.Sprintf("skip image version validation if the %q annotation is set", v1beta1.SkipReleaseImageValidation), + name: "wrong json", hc: &v1beta1.HostedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-under-test", Namespace: "myns", - Annotations: map[string]string{ - v1beta1.SkipReleaseImageValidation: "true", - }, }, Spec: v1beta1.HostedClusterSpec{ Platform: v1beta1.PlatformSpec{ @@ -211,61 +316,36 @@ func TestValidateKVHostedClusterCreate(t *testing.T) { PullSecret: corev1.LocalObjectReference{ Name: "pull-secret", }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, }, }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: false, - imageVersion: "4.13.0", - }, - { - name: "unknown image", - hc: &v1beta1.HostedCluster{ + np: &v1beta1.NodePool{ ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", + Name: "np-under-test", Namespace: "myns", + Annotations: map[string]string{ + v1beta1.JSONPatchAnnotation: `[{`, + }, }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ + Spec: v1beta1.NodePoolSpec{ + ClusterName: "cluster-under-test", + Platform: v1beta1.NodePoolPlatform{ Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", + Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, }, Release: v1beta1.Release{ - Image: "unknown", + Image: "image-4.15.0", }, }, }, cnvVersion: "1.0.0", k8sVersion: "1.27.0", expectError: true, - imageVersion: "", + imageVersion: "4.15.0", }, } { t.Run(testCase.name, func(tt *testing.T) { - origValidator := kvValidator - defer func() { - kvValidator = origValidator - }() - - cl := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(secret).Build() - clientMap := kubevirtexternalinfra.NewMockKubevirtInfraClientMap(cl, testCase.cnvVersion, testCase.k8sVersion) - - kvValidator = &kubevirtClusterValidator{ - client: cl, - clientMap: clientMap, - imageMetaDataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{Config: &docker10.DockerConfig{ - Labels: map[string]string{versionLabel: testCase.imageVersion}}}, - }, - } - - hcVal := &hostedClusterValidator{} - warnings, err := hcVal.ValidateCreate(context.Background(), testCase.hc) + npVal := &nodePoolValidator{} + warnings, err := npVal.ValidateCreate(context.Background(), testCase.np) if testCase.expectError && err == nil { t.Error("should return error but didn't") @@ -281,35 +361,19 @@ func TestValidateKVHostedClusterCreate(t *testing.T) { } } -func TestValidateKVHostedClusterUpdate(t *testing.T) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pull-secret", - Namespace: "myns", - }, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte("test secret"), - }, - } - +func TestValidateKVNodePoolUpdate(t *testing.T) { for _, testCase := range []struct { name string - oldHC *v1beta1.HostedCluster - newHC *v1beta1.HostedCluster + hc *v1beta1.HostedCluster + oldNP *v1beta1.NodePool + newNP *v1beta1.NodePool expectError bool expectWarnings bool imageVersion string }{ { name: "happy case - versions are valid", - oldHC: &v1beta1.HostedCluster{ - Spec: v1beta1.HostedClusterSpec{ - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - newHC: &v1beta1.HostedCluster{ + hc: &v1beta1.HostedCluster{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-under-test", Namespace: "myns", @@ -322,734 +386,12 @@ func TestValidateKVHostedClusterUpdate(t *testing.T) { PullSecret: corev1.LocalObjectReference{ Name: "pull-secret", }, + }, + }, + oldNP: &v1beta1.NodePool{ + Spec: v1beta1.NodePoolSpec{ Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - }, - expectError: false, - imageVersion: "4.15.0", - }, - { - name: "wrong json", - oldHC: &v1beta1.HostedCluster{ - Spec: v1beta1.HostedClusterSpec{ - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - newHC: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - Annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `[{`, - }, - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - }, - expectError: true, - imageVersion: "4.15.0", - }, - { - name: "image version too old", - oldHC: &v1beta1.HostedCluster{ - Spec: v1beta1.HostedClusterSpec{ - Release: v1beta1.Release{ - Image: "image-4.12.0", - }, - }, - }, - newHC: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - expectError: true, - imageVersion: "4.13.0", - }, - { - name: fmt.Sprintf("skip image version validation if the %q annotation is set", v1beta1.SkipReleaseImageValidation), - oldHC: &v1beta1.HostedCluster{ - Spec: v1beta1.HostedClusterSpec{ - Release: v1beta1.Release{ - Image: "image-4.12.0", - }, - }, - }, - newHC: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - Annotations: map[string]string{ - v1beta1.SkipReleaseImageValidation: "true", - }, - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - expectError: false, - imageVersion: "4.13.0", - }, - { - name: "unknown image", - oldHC: &v1beta1.HostedCluster{ - Spec: v1beta1.HostedClusterSpec{ - Release: v1beta1.Release{ - Image: "image-4.12.0", - }, - }, - }, - newHC: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - Release: v1beta1.Release{ - Image: "unknown", - }, - }, - }, - expectError: true, - imageVersion: "", - }, - { - name: "release image wasn't changed", - oldHC: &v1beta1.HostedCluster{ - Spec: v1beta1.HostedClusterSpec{ - Release: v1beta1.Release{ - Image: "unknown", - }, - }, - }, - newHC: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - Release: v1beta1.Release{ - Image: "unknown", // wrong image, but the same as old HC - }, - }, - }, - expectError: false, - imageVersion: "", - }, - } { - t.Run(testCase.name, func(tt *testing.T) { - origValidator := kvValidator - defer func() { - kvValidator = origValidator - }() - - cl := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(secret).Build() - - kvValidator = &kubevirtClusterValidator{ - client: cl, - //clientMap: nil, - imageMetaDataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{Config: &docker10.DockerConfig{ - Labels: map[string]string{versionLabel: testCase.imageVersion}}}, - }, - } - - hcVal := &hostedClusterValidator{} - warnings, err := hcVal.ValidateUpdate(context.Background(), testCase.oldHC, testCase.newHC) - - if testCase.expectError && err == nil { - t.Error("should return error but didn't") - } else if !testCase.expectError && err != nil { - t.Errorf("should not return error but returned %q", err.Error()) - } - if testCase.expectWarnings && warnings == nil { - t.Error("should return warnings but didn't") - } else if !testCase.expectWarnings && warnings != nil { - t.Errorf("should not return warnings but returned %q", warnings) - } - }) - } -} - -func TestKVClusterValidator_getImageVersion(t *testing.T) { - hc := &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - } - - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: hc.Spec.PullSecret.Name, - Namespace: hc.Namespace, - }, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte("test secret"), - }, - } - cl := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(secret).Build() - - v := kubevirtClusterValidator{ - client: cl, - clientMap: nil, - imageMetaDataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{Config: &docker10.DockerConfig{ - Labels: map[string]string{versionLabel: "4.15.0"}}}, - }, - } - - ctx := context.Background() - ver, err := v.getImageVersion(ctx, hc, hyperutil.HCControlPlaneReleaseImage(hc)) - if err != nil { - t.Fatalf("should not return error but it did: %v", err) - } - - if ver == nil { - t.Fatalf("should return version but it didn't") - } - - if ver.Major != 4 || ver.Minor != 15 || ver.Patch != 0 { - t.Errorf("version should be 4.15.0, but it's %s", ver.String()) - } -} - -func TestValidateJsonAnnotation(t *testing.T) { - for _, tc := range []struct { - name string - annotations map[string]string - expectError bool - }{ - { - name: "no annotation", - annotations: nil, - expectError: false, - }, - { - name: "valid annotation", - annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `[{"op": "replace","path": "/spec/domain/cpu/cores","value": 3}]`, - }, - expectError: false, - }, - { - name: "not an array", - annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `{"op": "replace","path": "/spec/domain/cpu/cores","value": 3}`, - }, - expectError: true, - }, - { - name: "corrupted json", - annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `[{"op": "replace","path": "/spec/domain/cpu/cores","value": 3}`, - }, - expectError: true, - }, - { - name: "missing op", - annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `[{"path": "/spec/domain/cpu/cores","value": 3}]`, - }, - expectError: true, - }, - { - name: "missing path", - annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `[{"op": "replace","value": 3}]`, - }, - expectError: true, - }, - { - name: "missing value", - annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `[{"op": "replace","path": "/spec/domain/cpu/cores"}]`, - }, - expectError: true, - }, - } { - t.Run(tc.name, func(tt *testing.T) { - err := validateJsonAnnotation(tc.annotations) - if (err != nil) != tc.expectError { - errMsgBool := []string{" ", "did"} - if !tc.expectError { - errMsgBool = []string{" not ", "didn't"} - } - tt.Errorf("should%sreturn error, but it %s. error: %v", errMsgBool[0], errMsgBool[1], err) - } - }) - } -} - -func TestValidateKVNodePoolCreate(t *testing.T) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pull-secret", - Namespace: "myns", - }, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte("test secret"), - }, - } - - for _, testCase := range []struct { - name string - hc *v1beta1.HostedCluster - np *v1beta1.NodePool - cnvVersion string - k8sVersion string - expectError bool - expectWarnings bool - imageVersion string - }{ - { - name: "happy case - versions are valid", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: false, - imageVersion: "4.15.0", - }, - { - name: "wrong json", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - Annotations: map[string]string{ - v1beta1.JSONPatchAnnotation: `[{`, - }, - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: true, - imageVersion: "4.15.0", - }, - { - name: "cnv version not supported", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - }, - cnvVersion: "0.111.0", - k8sVersion: "1.27.0", - expectError: true, - imageVersion: "4.15.0", - }, - { - name: "k8s version not supported", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - }, - cnvVersion: "1.0.0", - k8sVersion: "1.26.99", - expectError: true, - imageVersion: "4.15.0", - }, - { - name: "no kubevirt field", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.15.0", - }, - }, - }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: true, - imageVersion: "4.15.0", - }, - { - name: "image version too old", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: true, - imageVersion: "4.13.0", - }, - { - name: `skip image version validation if the "hypershift.openshift.io/skip-release-image-validation" annotation is set`, - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - Annotations: map[string]string{ - v1beta1.SkipReleaseImageValidation: "true", - }, - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: false, - imageVersion: "4.13.0", - }, - { - name: "unknown image", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - np: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "unknown", - }, - }, - }, - cnvVersion: "1.0.0", - k8sVersion: "1.27.0", - expectError: true, - imageVersion: "", - }, - } { - t.Run(testCase.name, func(tt *testing.T) { - origValidator := kvValidator - defer func() { - kvValidator = origValidator - }() - - cl := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(secret, testCase.hc).Build() - clientMap := kubevirtexternalinfra.NewMockKubevirtInfraClientMap(cl, testCase.cnvVersion, testCase.k8sVersion) - - kvValidator = &kubevirtClusterValidator{ - client: cl, - clientMap: clientMap, - imageMetaDataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{Result: &dockerv1client.DockerImageConfig{Config: &docker10.DockerConfig{ - Labels: map[string]string{versionLabel: testCase.imageVersion}}}, - }, - } - - npVal := &nodePoolValidator{client: cl} - warnings, err := npVal.ValidateCreate(context.Background(), testCase.np) - - if testCase.expectError && err == nil { - t.Error("should return error but didn't") - } else if !testCase.expectError && err != nil { - t.Errorf("should not return error but returned %q", err.Error()) - } - if testCase.expectWarnings && warnings == nil { - t.Error("should return warnings but didn't") - } else if !testCase.expectWarnings && warnings != nil { - t.Errorf("should not return warnings but returned %q", warnings) - } - }) - } -} - -func TestValidateKVNodePoolUpdate(t *testing.T) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pull-secret", - Namespace: "myns", - }, - Data: map[string][]byte{ - corev1.DockerConfigJsonKey: []byte("test secret"), - }, - } - - for _, testCase := range []struct { - name string - hc *v1beta1.HostedCluster - oldNP *v1beta1.NodePool - newNP *v1beta1.NodePool - expectError bool - expectWarnings bool - imageVersion string - }{ - { - name: "happy case - versions are valid", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - oldNP: &v1beta1.NodePool{ - Spec: v1beta1.NodePoolSpec{ - Release: v1beta1.Release{ - Image: "image-4.14.0", + Image: "image-4.14.0", }, }, }, @@ -1118,202 +460,9 @@ func TestValidateKVNodePoolUpdate(t *testing.T) { expectError: true, imageVersion: "4.15.0", }, - { - name: "image version too old", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - oldNP: &v1beta1.NodePool{ - Spec: v1beta1.NodePoolSpec{ - Release: v1beta1.Release{ - Image: "image-4.12.0", - }, - }, - }, - newNP: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - expectError: true, - imageVersion: "4.13.0", - }, - { - name: `skip image version validation if the "hypershift.openshift.io/skip-release-image-validation" annotation is set`, - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - Annotations: map[string]string{ - v1beta1.SkipReleaseImageValidation: "true", - }, - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - oldNP: &v1beta1.NodePool{ - Spec: v1beta1.NodePoolSpec{ - Release: v1beta1.Release{ - Image: "image-4.12.0", - }, - }, - }, - newNP: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "image-4.13.0", - }, - }, - }, - expectError: false, - imageVersion: "4.13.0", - }, - { - name: "unknown image", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - oldNP: &v1beta1.NodePool{ - Spec: v1beta1.NodePoolSpec{ - Release: v1beta1.Release{ - Image: "image-4.12.0", - }, - }, - }, - newNP: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "unknown", - }, - }, - }, - expectError: true, - imageVersion: "", - }, - { - name: "release image wasn't changed", - hc: &v1beta1.HostedCluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster-under-test", - Namespace: "myns", - }, - Spec: v1beta1.HostedClusterSpec{ - Platform: v1beta1.PlatformSpec{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtPlatformSpec{}, - }, - PullSecret: corev1.LocalObjectReference{ - Name: "pull-secret", - }, - }, - }, - oldNP: &v1beta1.NodePool{ - Spec: v1beta1.NodePoolSpec{ - Release: v1beta1.Release{ - Image: "unknown", - }, - }, - }, - newNP: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "np-under-test", - Namespace: "myns", - }, - Spec: v1beta1.NodePoolSpec{ - ClusterName: "cluster-under-test", - Platform: v1beta1.NodePoolPlatform{ - Type: v1beta1.KubevirtPlatform, - Kubevirt: &v1beta1.KubevirtNodePoolPlatform{}, - }, - Release: v1beta1.Release{ - Image: "unknown", - }, - }, - }, - expectError: false, - imageVersion: "", - }, } { t.Run(testCase.name, func(tt *testing.T) { - origValidator := kvValidator - defer func() { - kvValidator = origValidator - }() - - cl := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(secret, testCase.hc).Build() - - kvValidator = &kubevirtClusterValidator{ - client: cl, - imageMetaDataProvider: &fakeimagemetadataprovider.FakeImageMetadataProvider{ - Result: &dockerv1client.DockerImageConfig{ - Config: &docker10.DockerConfig{ - Labels: map[string]string{versionLabel: testCase.imageVersion}, - }, - }, - }, - } - - npVal := &nodePoolValidator{client: cl} + npVal := &nodePoolValidator{} warnings, err := npVal.ValidateUpdate(context.Background(), testCase.oldNP, testCase.newNP) if testCase.expectError && err == nil { diff --git a/hypershift-operator/controllers/hostedcluster/ignitionserver/ignitionserver.go b/hypershift-operator/controllers/hostedcluster/ignitionserver/ignitionserver.go index 7cbc1e26aa..5c24da9084 100644 --- a/hypershift-operator/controllers/hostedcluster/ignitionserver/ignitionserver.go +++ b/hypershift-operator/controllers/hostedcluster/ignitionserver/ignitionserver.go @@ -97,7 +97,7 @@ func ReconcileIgnitionServer(ctx context.Context, if serviceStrategy.Route != nil { hostname = serviceStrategy.Route.Hostname } - err := reconcileExternalRoute(ignitionServerRoute, ownerRef, routeServiceName, hostname, defaultIngressDomain) + err := reconcileExternalRoute(ignitionServerRoute, ownerRef, routeServiceName, hostname, defaultIngressDomain, hostname != "") if err != nil { return fmt.Errorf("failed to reconcile external route in ignition server: %w", err) } @@ -325,9 +325,9 @@ func reconcileIgnitionServerServiceWithProxy(svc *corev1.Service, strategy *hype return nil } -func reconcileExternalRoute(route *routev1.Route, ownerRef config.OwnerRef, svcName string, hostname string, defaultIngressDomain string) error { +func reconcileExternalRoute(route *routev1.Route, ownerRef config.OwnerRef, svcName string, hostname string, defaultIngressDomain string, labelHCPRoutes bool) error { ownerRef.ApplyTo(route) - return util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, svcName) + return util.ReconcileExternalRoute(route, hostname, defaultIngressDomain, svcName, labelHCPRoutes) } func reconcileInternalRoute(route *routev1.Route, ownerRef config.OwnerRef, svcName string) error { diff --git a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go index 822d18062d..811bd514c7 100644 --- a/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go +++ b/hypershift-operator/controllers/hostedcluster/internal/platform/aws/aws.go @@ -230,7 +230,7 @@ func (p AWS) CAPIProviderDeploymentSpec(hcluster *hyperv1.HostedCluster, hcp *hy }, }, } - util.AvailabilityProber(kas.InClusterKASReadyURL(), p.utilitiesImage, &deploymentSpec.Template.Spec) + util.AvailabilityProber(kas.InClusterKASReadyURL(hcp.Spec.Platform.Type), p.utilitiesImage, &deploymentSpec.Template.Spec) return deploymentSpec, nil } @@ -241,6 +241,7 @@ func (p AWS) ReconcileCredentials(ctx context.Context, c client.Client, createOr awsCredentialsTemplate := `[default] role_arn = %s web_identity_token_file = /var/run/secrets/openshift/serviceaccount/token +sts_regional_endpoints = regional ` // TODO (alberto): consider moving this reconciliation logic down to the CPO. // this is not trivial as the CPO deployment itself needs the secret with the ControlPlaneOperatorARN diff --git a/hypershift-operator/controllers/hostedcluster/network_policies.go b/hypershift-operator/controllers/hostedcluster/network_policies.go index 71e97c3435..88b10adc31 100644 --- a/hypershift-operator/controllers/hostedcluster/network_policies.go +++ b/hypershift-operator/controllers/hostedcluster/network_policies.go @@ -139,6 +139,13 @@ func (r *HostedClusterReconciler) reconcileNetworkPolicies(ctx context.Context, }); err != nil { return fmt.Errorf("failed to reconcile ignition nodeport network policy: %w", err) } + // Reconcile nodeport-ignition-proxy Network Policy + policy = networkpolicy.NodePortIgnitionProxyNetworkPolicy(controlPlaneNamespaceName) + if _, err := createOrUpdate(ctx, r.Client, policy, func() error { + return reconcileNodePortIgnitionProxyNetworkPolicy(policy, hcluster) + }); err != nil { + return fmt.Errorf("failed to reconcile ignition proxy nodeport network policy: %w", err) + } } case hyperv1.Konnectivity: if svc.ServicePublishingStrategy.Type == hyperv1.NodePort { @@ -149,6 +156,15 @@ func (r *HostedClusterReconciler) reconcileNetworkPolicies(ctx context.Context, }); err != nil { return fmt.Errorf("failed to reconcile konnectivity nodeport network policy: %w", err) } + + // Reconcile nodeport-konnectivity Network Policy when konnectivity is hosted in the kas pod + policy = networkpolicy.NodePortKonnectivityKASNetworkPolicy(controlPlaneNamespaceName) + if _, err := createOrUpdate(ctx, r.Client, policy, func() error { + return reconcileNodePortKonnectivityKASNetworkPolicy(policy, hcluster) + }); err != nil { + return fmt.Errorf("failed to reconcile konnectivity nodeport network policy: %w", err) + } + } } } @@ -158,6 +174,9 @@ func (r *HostedClusterReconciler) reconcileNetworkPolicies(ctx context.Context, func reconcileKASNetworkPolicy(policy *networkingv1.NetworkPolicy, hcluster *hyperv1.HostedCluster, isOpenShiftDNS bool, managementClusterNetwork *configv1.Network) error { port := intstr.FromInt32(config.KASSVCPort) + if hcluster.Spec.Platform.Type == hyperv1.IBMCloudPlatform { + port = intstr.FromInt32(config.KASSVCIBMCloudPort) + } protocol := corev1.ProtocolTCP policy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeIngress} policy.Spec.Ingress = []networkingv1.NetworkPolicyIngressRule{ @@ -342,6 +361,29 @@ func reconcileNodePortOauthNetworkPolicy(policy *networkingv1.NetworkPolicy, hcl return nil } +func reconcileNodePortIgnitionProxyNetworkPolicy(policy *networkingv1.NetworkPolicy, hcluster *hyperv1.HostedCluster) error { + port := intstr.FromInt(8443) + protocol := corev1.ProtocolTCP + policy.Spec.Ingress = []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{}, + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: &port, + Protocol: &protocol, + }, + }, + }, + } + policy.Spec.PodSelector = metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "ignition-server-proxy", + }, + } + policy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeIngress} + return nil +} + func reconcileNodePortIgnitionNetworkPolicy(policy *networkingv1.NetworkPolicy, hcluster *hyperv1.HostedCluster) error { port := intstr.FromInt(9090) protocol := corev1.ProtocolTCP @@ -365,6 +407,29 @@ func reconcileNodePortIgnitionNetworkPolicy(policy *networkingv1.NetworkPolicy, return nil } +func reconcileNodePortKonnectivityKASNetworkPolicy(policy *networkingv1.NetworkPolicy, hcluster *hyperv1.HostedCluster) error { + port := intstr.FromInt(8091) + protocol := corev1.ProtocolTCP + policy.Spec.Ingress = []networkingv1.NetworkPolicyIngressRule{ + { + From: []networkingv1.NetworkPolicyPeer{}, + Ports: []networkingv1.NetworkPolicyPort{ + { + Port: &port, + Protocol: &protocol, + }, + }, + }, + } + policy.Spec.PodSelector = metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "kube-apiserver", + }, + } + policy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeIngress} + return nil +} + func reconcileNodePortKonnectivityNetworkPolicy(policy *networkingv1.NetworkPolicy, hcluster *hyperv1.HostedCluster) error { port := intstr.FromInt(8091) protocol := corev1.ProtocolTCP diff --git a/hypershift-operator/controllers/manifests/controlplanepkioperator/manifests.go b/hypershift-operator/controllers/manifests/controlplanepkioperator/manifests.go new file mode 100644 index 0000000000..73ca08ad48 --- /dev/null +++ b/hypershift-operator/controllers/manifests/controlplanepkioperator/manifests.go @@ -0,0 +1,54 @@ +package controlplanepkioperator + +import ( + "fmt" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/openshift/hypershift/hypershift-operator/controllers/manifests" +) + +// we require labelling these cluster-scoped resources so that the controller cleaning them up +// can find them efficiently, as we can't use namespace-based scoping for these objects +const ( + OwningHostedClusterNamespaceLabel = "hypershift.openshift.io/owner.namespace" + OwningHostedClusterNameLabel = "hypershift.openshift.io/owner.name" +) + +func CSRApproverClusterRole(hc *hypershiftv1beta1.HostedCluster) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-csr-approver", manifests.HostedControlPlaneNamespace(hc.Namespace, hc.Name)), + Labels: map[string]string{ + OwningHostedClusterNamespaceLabel: hc.Namespace, + OwningHostedClusterNameLabel: hc.Name, + }, + }, + } +} + +func CSRSignerClusterRole(hc *hypershiftv1beta1.HostedCluster) *rbacv1.ClusterRole { + return &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-csr-signer", manifests.HostedControlPlaneNamespace(hc.Namespace, hc.Name)), + Labels: map[string]string{ + OwningHostedClusterNamespaceLabel: hc.Namespace, + OwningHostedClusterNameLabel: hc.Name, + }, + }, + } +} + +func ClusterRoleBinding(hc *hypershiftv1beta1.HostedCluster, clusterRole *rbacv1.ClusterRole) *rbacv1.ClusterRoleBinding { + return &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRole.Name, + Labels: map[string]string{ + OwningHostedClusterNamespaceLabel: hc.Namespace, + OwningHostedClusterNameLabel: hc.Name, + }, + }, + } +} diff --git a/hypershift-operator/controllers/manifests/controlplanepkioperator/reconcile.go b/hypershift-operator/controllers/manifests/controlplanepkioperator/reconcile.go new file mode 100644 index 0000000000..9bc65262ca --- /dev/null +++ b/hypershift-operator/controllers/manifests/controlplanepkioperator/reconcile.go @@ -0,0 +1,79 @@ +package controlplanepkioperator + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" +) + +func ReconcileCSRApproverClusterRole(clusterRole *rbacv1.ClusterRole, hc *hypershiftv1beta1.HostedCluster, signers ...certificates.SignerClass) error { + var signerNames []string + for _, signer := range signers { + signerNames = append(signerNames, certificates.SignerNameForHC(hc, signer)) + } + clusterRole.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests/approval"}, + Verbs: []string{"update"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"signers"}, + Verbs: []string{"approve"}, + ResourceNames: signerNames, + }, + } + + return nil +} + +func ReconcileCSRSignerClusterRole(clusterRole *rbacv1.ClusterRole, hc *hypershiftv1beta1.HostedCluster, signers ...certificates.SignerClass) error { + var signerNames []string + for _, signer := range signers { + signerNames = append(signerNames, certificates.SignerNameForHC(hc, signer)) + } + clusterRole.Rules = []rbacv1.PolicyRule{ + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"certificatesigningrequests/status"}, + Verbs: []string{"patch"}, + }, + { + APIGroups: []string{"certificates.k8s.io"}, + Resources: []string{"signers"}, + Verbs: []string{"sign"}, + ResourceNames: signerNames, + }, + } + + return nil +} + +func ReconcileClusterRoleBinding(clusterRoleBinding *rbacv1.ClusterRoleBinding, clusterRole *rbacv1.ClusterRole, serviceAccount *corev1.ServiceAccount) error { + clusterRoleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRole.Name, + } + clusterRoleBinding.Subjects = []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: serviceAccount.Name, + Namespace: serviceAccount.Namespace, + }, + } + return nil +} diff --git a/hypershift-operator/controllers/manifests/controlplanepkioperator/reconcile_test.go b/hypershift-operator/controllers/manifests/controlplanepkioperator/reconcile_test.go new file mode 100644 index 0000000000..0825efeb7e --- /dev/null +++ b/hypershift-operator/controllers/manifests/controlplanepkioperator/reconcile_test.go @@ -0,0 +1,82 @@ +package controlplanepkioperator_test + +import ( + "testing" + + "github.com/openshift/hypershift/api" + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + "github.com/openshift/hypershift/hypershift-operator/controllers/manifests/controlplanepkioperator" + "github.com/openshift/hypershift/support/testutil" + "github.com/openshift/hypershift/support/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestReconcileCSRApproverClusterRole(t *testing.T) { + hostedCluster := &hypershiftv1beta1.HostedCluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-hc", + }} + clusterRole := controlplanepkioperator.CSRApproverClusterRole(hostedCluster) + if err := controlplanepkioperator.ReconcileCSRApproverClusterRole(clusterRole, hostedCluster, certificates.CustomerBreakGlassSigner, certificates.SREBreakGlassSigner); err != nil { + t.Fatalf("unexpected error: %v", err) + } + clusterRoleYaml, err := util.SerializeResource(clusterRole, api.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, clusterRoleYaml) +} + +func TestReconcileCSRSignerClusterRole(t *testing.T) { + hostedCluster := &hypershiftv1beta1.HostedCluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-hc", + }} + clusterRole := controlplanepkioperator.CSRSignerClusterRole(hostedCluster) + if err := controlplanepkioperator.ReconcileCSRSignerClusterRole(clusterRole, hostedCluster, certificates.CustomerBreakGlassSigner, certificates.SREBreakGlassSigner); err != nil { + t.Fatalf("unexpected error: %v", err) + } + clusterRoleYaml, err := util.SerializeResource(clusterRole, api.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, clusterRoleYaml) +} + +func TestReconcileCSRApproverClusterRoleBinding(t *testing.T) { + hostedCluster := &hypershiftv1beta1.HostedCluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-hc", + }} + serviceAccount := manifests.PKIOperatorServiceAccount("test-namespace") + clusterRole := controlplanepkioperator.CSRApproverClusterRole(hostedCluster) + clusterRoleBinding := controlplanepkioperator.ClusterRoleBinding(hostedCluster, clusterRole) + if err := controlplanepkioperator.ReconcileClusterRoleBinding(clusterRoleBinding, clusterRole, serviceAccount); err != nil { + t.Fatalf("unexpected error: %v", err) + } + clusterRoleBindingYaml, err := util.SerializeResource(clusterRoleBinding, api.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, clusterRoleBindingYaml) +} + +func TestReconcileCSRSignerClusterRoleBinding(t *testing.T) { + hostedCluster := &hypershiftv1beta1.HostedCluster{ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-hc", + }} + serviceAccount := manifests.PKIOperatorServiceAccount("test-namespace") + clusterRole := controlplanepkioperator.CSRSignerClusterRole(hostedCluster) + clusterRoleBinding := controlplanepkioperator.ClusterRoleBinding(hostedCluster, clusterRole) + if err := controlplanepkioperator.ReconcileClusterRoleBinding(clusterRoleBinding, clusterRole, serviceAccount); err != nil { + t.Fatalf("unexpected error: %v", err) + } + clusterRoleBindingYaml, err := util.SerializeResource(clusterRoleBinding, api.Scheme) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + testutil.CompareWithFixture(t, clusterRoleBindingYaml) +} diff --git a/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRApproverClusterRole.yaml b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRApproverClusterRole.yaml new file mode 100644 index 0000000000..ab7ae7ea6a --- /dev/null +++ b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRApproverClusterRole.yaml @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/owner.name: test-hc + hypershift.openshift.io/owner.namespace: test-namespace + name: test-namespace-test-hc-csr-approver +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update +- apiGroups: + - certificates.k8s.io + resourceNames: + - hypershift.openshift.io/test-namespace-test-hc.customer-break-glass + - hypershift.openshift.io/test-namespace-test-hc.sre-break-glass + resources: + - signers + verbs: + - approve diff --git a/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRApproverClusterRoleBinding.yaml b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRApproverClusterRoleBinding.yaml new file mode 100644 index 0000000000..2889ad4a62 --- /dev/null +++ b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRApproverClusterRoleBinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/owner.name: test-hc + hypershift.openshift.io/owner.namespace: test-namespace + name: test-namespace-test-hc-csr-approver +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: test-namespace-test-hc-csr-approver +subjects: +- kind: ServiceAccount + name: control-plane-pki-operator + namespace: test-namespace diff --git a/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRSignerClusterRole.yaml b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRSignerClusterRole.yaml new file mode 100644 index 0000000000..fb69238b32 --- /dev/null +++ b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRSignerClusterRole.yaml @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/owner.name: test-hc + hypershift.openshift.io/owner.namespace: test-namespace + name: test-namespace-test-hc-csr-signer +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - list + - watch +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/status + verbs: + - patch +- apiGroups: + - certificates.k8s.io + resourceNames: + - hypershift.openshift.io/test-namespace-test-hc.customer-break-glass + - hypershift.openshift.io/test-namespace-test-hc.sre-break-glass + resources: + - signers + verbs: + - sign diff --git a/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRSignerClusterRoleBinding.yaml b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRSignerClusterRoleBinding.yaml new file mode 100644 index 0000000000..8bd44f4aaf --- /dev/null +++ b/hypershift-operator/controllers/manifests/controlplanepkioperator/testdata/zz_fixture_TestReconcileCSRSignerClusterRoleBinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + creationTimestamp: null + labels: + hypershift.openshift.io/owner.name: test-hc + hypershift.openshift.io/owner.namespace: test-namespace + name: test-namespace-test-hc-csr-signer +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: test-namespace-test-hc-csr-signer +subjects: +- kind: ServiceAccount + name: control-plane-pki-operator + namespace: test-namespace diff --git a/hypershift-operator/controllers/manifests/networkpolicy/manifests.go b/hypershift-operator/controllers/manifests/networkpolicy/manifests.go index f770eabb5e..09ff3e93cb 100644 --- a/hypershift-operator/controllers/manifests/networkpolicy/manifests.go +++ b/hypershift-operator/controllers/manifests/networkpolicy/manifests.go @@ -86,6 +86,15 @@ func NodePortIgnitionNetworkPolicy(namespace string) *networkingv1.NetworkPolicy } } +func NodePortIgnitionProxyNetworkPolicy(namespace string) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "nodeport-ignition-proxy", + }, + } +} + func NodePortKonnectivityNetworkPolicy(namespace string) *networkingv1.NetworkPolicy { return &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ @@ -95,6 +104,15 @@ func NodePortKonnectivityNetworkPolicy(namespace string) *networkingv1.NetworkPo } } +func NodePortKonnectivityKASNetworkPolicy(namespace string) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "nodeport-konnectivity-kas", + }, + } +} + func VirtLauncherNetworkPolicy(namespace string) *networkingv1.NetworkPolicy { return &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ diff --git a/hypershift-operator/controllers/nodepool/haproxy.go b/hypershift-operator/controllers/nodepool/haproxy.go index 412374388f..d78f1cddbb 100644 --- a/hypershift-operator/controllers/nodepool/haproxy.go +++ b/hypershift-operator/controllers/nodepool/haproxy.go @@ -66,7 +66,7 @@ func (r *NodePoolReconciler) reconcileHAProxyIgnitionConfig(ctx context.Context, if util.IsPrivateHC(hcluster) { apiServerExternalAddress = fmt.Sprintf("api.%s.hypershift.local", hcluster.Name) - apiServerExternalPort = 443 + apiServerExternalPort = util.APIPortForLocalZone(util.IsLBKASByHC(hcluster)) } else { if hcluster.Status.KubeConfig == nil { return "", true, nil @@ -121,13 +121,25 @@ func (r *NodePoolReconciler) reconcileHAProxyIgnitionConfig(ctx context.Context, } } var apiserverProxy string + var noProxy string if hcluster.Spec.Configuration != nil && hcluster.Spec.Configuration.Proxy != nil && hcluster.Spec.Configuration.Proxy.HTTPSProxy != "" && util.ConnectsThroughInternetToControlplane(hcluster.Spec.Platform) { apiserverProxy = hcluster.Spec.Configuration.Proxy.HTTPSProxy + noProxy = hcluster.Spec.Configuration.Proxy.NoProxy } machineConfig := manifests.MachineConfigAPIServerHAProxy() ignition.SetMachineConfigLabels(machineConfig) - serializedConfig, err := apiServerProxyConfig(haProxyImage, controlPlaneOperatorImage, apiServerExternalAddress, apiServerInternalAddress, apiServerExternalPort, apiServerInternalPort, apiserverProxy) + + // Sanity check, thought this should never be <0 as hcluster.Spec.Networking is defaulted in the API. + var serviceNetworkCIDR, clusterNetworkCIDR string + if len(hcluster.Spec.Networking.ServiceNetwork) > 0 { + serviceNetworkCIDR = hcluster.Spec.Networking.ServiceNetwork[0].CIDR.String() + } + if len(hcluster.Spec.Networking.ClusterNetwork) > 0 { + clusterNetworkCIDR = hcluster.Spec.Networking.ClusterNetwork[0].CIDR.String() + } + + serializedConfig, err := apiServerProxyConfig(haProxyImage, controlPlaneOperatorImage, apiServerExternalAddress, apiServerInternalAddress, apiServerExternalPort, apiServerInternalPort, apiserverProxy, noProxy, serviceNetworkCIDR, clusterNetworkCIDR) if err != nil { return "", true, fmt.Errorf("failed to create apiserver haproxy config: %w", err) } @@ -193,7 +205,7 @@ var ( haProxyConfigTemplate = template.Must(template.New("haProxyConfig").Parse(MustAsset("apiserver-haproxy/haproxy.cfg"))) ) -func apiServerProxyConfig(haProxyImage, cpoImage, externalAPIAddress, internalAPIAddress string, externalAPIPort, internalAPIPort int32, proxyAddr string) ([]byte, error) { +func apiServerProxyConfig(haProxyImage, cpoImage, externalAPIAddress, internalAPIAddress string, externalAPIPort, internalAPIPort int32, proxyAddr, noProxy, serviceNetwork, clusterNetwork string) ([]byte, error) { config := &ignitionapi.Config{} config.Ignition.Version = ignitionapi.MaxVersion.String() @@ -215,7 +227,18 @@ func apiServerProxyConfig(haProxyImage, cpoImage, externalAPIAddress, internalAP }, }, } - if proxyAddr == "" { + + // Check if no proxy contains any address that should result in skipping the system proxy + skipProxyForKAS := func() bool { + for _, s := range []string{internalAPIAddress, "kubernetes", serviceNetwork, clusterNetwork} { + if strings.Contains(noProxy, s) { + return true + } + } + return false + }() + + if proxyAddr == "" || skipProxyForKAS { filesToAdd = append(filesToAdd, []fileToAdd{ { template: haProxyConfigTemplate, diff --git a/hypershift-operator/controllers/nodepool/haproxy_test.go b/hypershift-operator/controllers/nodepool/haproxy_test.go index f7be8ea341..bfb3556020 100644 --- a/hypershift-operator/controllers/nodepool/haproxy_test.go +++ b/hypershift-operator/controllers/nodepool/haproxy_test.go @@ -24,15 +24,55 @@ func TestAPIServerHAProxyConfig(t *testing.T) { image := "ha-proxy-image:latest" externalAddress := "cluster.example.com" internalAddress := "cluster.internal.example.com" - config, err := apiServerProxyConfig(image, "", externalAddress, internalAddress, 443, 8443, "") - if err != nil { - t.Fatalf("unexpected error: %v", err) + serviceNetwork := " 10.134.0.0/16" + clusterNetwork := " 10.128.0.0/14" + + testCases := []struct { + name string + proxy string + noProxy string + }{ + { + name: "when empty proxy it should create an haproxy", + proxy: "", + noProxy: "localhost,127.0.0.1", + }, + { + name: "when noproxy matches internalAddress it should create an haproxy", + proxy: "proxy", + noProxy: "localhost,127.0.0.1," + internalAddress, + }, + { + name: "when noproxy matches serviceNetwork it should create an haproxy", + proxy: "proxy", + noProxy: "localhost," + serviceNetwork + ",127.0.0.1,", + }, + { + name: "when noproxy matches clusterNetwork it should create an haproxy", + proxy: "proxy", + noProxy: "localhost," + clusterNetwork + ",127.0.0.1,", + }, + { + name: "when noproxy matches kubernetes it should create an haproxy", + proxy: "proxy", + noProxy: "localhost,kubernetes.svc,127.0.0.1,", + }, } - yamlConfig, err := yaml.JSONToYAML(config) - if err != nil { - t.Fatalf("cannot convert to yaml: %v", err) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + config, err := apiServerProxyConfig(image, tc.proxy, externalAddress, internalAddress, 443, 8443, + tc.proxy, tc.noProxy, serviceNetwork, clusterNetwork) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + yamlConfig, err := yaml.JSONToYAML(config) + if err != nil { + t.Fatalf("cannot convert to yaml: %v", err) + } + testutil.CompareWithFixture(t, yamlConfig) + }) } - testutil.CompareWithFixture(t, yamlConfig) } func TestReconcileHAProxyIgnitionConfig(t *testing.T) { @@ -97,6 +137,22 @@ kind: Config` expectedHAProxyConfigContent: []string{"api." + hc().Name + ".hypershift.local:443"}, }, + { + name: "private cluster uses .local address and LB kas", + hc: hc(func(hc *hyperv1.HostedCluster) { + hc.Spec.Platform.AWS.EndpointAccess = hyperv1.Private + hc.Spec.Networking.ServiceNetwork = []hyperv1.ServiceNetworkEntry{{CIDR: *ipnet.MustParseCIDR("192.168.1.0/24")}} + hc.Spec.Services = []hyperv1.ServicePublishingStrategyMapping{ + { + Service: hyperv1.APIServer, + ServicePublishingStrategy: hyperv1.ServicePublishingStrategy{ + Type: hyperv1.LoadBalancer, + }, + }, + } + }), + expectedHAProxyConfigContent: []string{"api." + hc().Name + ".hypershift.local:6443"}, + }, { name: "public and private cluster uses .local address", hc: hc(func(hc *hyperv1.HostedCluster) { @@ -116,6 +172,22 @@ kind: Config` expectedHAProxyConfigContent: []string{"api." + hc().Name + ".hypershift.local:443"}, }, + { + name: "public and private cluster uses .local address and LB kas", + hc: hc(func(hc *hyperv1.HostedCluster) { + hc.Spec.Platform.AWS.EndpointAccess = hyperv1.PublicAndPrivate + hc.Spec.Networking.ServiceNetwork = []hyperv1.ServiceNetworkEntry{{CIDR: *ipnet.MustParseCIDR("192.168.1.0/24")}} + hc.Spec.Services = []hyperv1.ServicePublishingStrategyMapping{ + { + Service: hyperv1.APIServer, + ServicePublishingStrategy: hyperv1.ServicePublishingStrategy{ + Type: hyperv1.LoadBalancer, + }, + }, + } + }), + expectedHAProxyConfigContent: []string{"api." + hc().Name + ".hypershift.local:6443"}, + }, { name: "public cluster uses address from kubeconfig", hc: hc(func(hc *hyperv1.HostedCluster) { diff --git a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go index f05c7a6b6f..bd3dc4e0b5 100644 --- a/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go +++ b/hypershift-operator/controllers/nodepool/kubevirt/kubevirt.go @@ -267,6 +267,10 @@ func virtualMachineTemplateBase(nodePool *hyperv1.NodePool, bootImage BootImage) template.Spec.Template.Spec.Domain.Devices.NetworkInterfaceMultiQueue = ptr.To(true) } + if kvPlatform.NodeSelector != nil && len(kvPlatform.NodeSelector) > 0 { + template.Spec.Template.Spec.NodeSelector = kvPlatform.NodeSelector + } + return template } @@ -394,7 +398,7 @@ func applyJsonPatches(nodePool *hyperv1.NodePool, hcluster *hyperv1.HostedCluste return nil } - //tmplt.Spec.Template.Spec.Networks[0].Multus.NetworkName + // tmplt.Spec.Template.Spec.Networks[0].Multus.NetworkName buff := &bytes.Buffer{} dec := json.NewEncoder(buff) err := dec.Encode(tmplt) diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller.go b/hypershift-operator/controllers/nodepool/nodepool_controller.go index ceefbda45d..67c7a059f8 100644 --- a/hypershift-operator/controllers/nodepool/nodepool_controller.go +++ b/hypershift-operator/controllers/nodepool/nodepool_controller.go @@ -60,6 +60,7 @@ import ( "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -87,6 +88,7 @@ const ( TokenSecretReleaseKey = "release" TokenSecretTokenKey = "token" TokenSecretPullSecretHashKey = "pull-secret-hash" + TokenSecretHCConfigurationHashKey = "hc-configuration-hash" TokenSecretConfigKey = "config" TokenSecretAnnotation = "hypershift.openshift.io/ignition-config" TokenSecretIgnitionReachedAnnotation = "hypershift.openshift.io/ignition-reached" @@ -125,18 +127,18 @@ type CPOCapabilities struct { func (r *NodePoolReconciler) SetupWithManager(mgr ctrl.Manager) error { controller, err := ctrl.NewControllerManagedBy(mgr). - For(&hyperv1.NodePool{}). + For(&hyperv1.NodePool{}, builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). // We want to reconcile when the HostedCluster IgnitionEndpoint is available. - Watches(&hyperv1.HostedCluster{}, handler.EnqueueRequestsFromMapFunc(r.enqueueNodePoolsForHostedCluster)). - Watches(&capiv1.MachineDeployment{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool)). - Watches(&capiv1.MachineSet{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool)). - Watches(&capiaws.AWSMachineTemplate{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool)). - Watches(&agentv1.AgentMachineTemplate{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool)). - Watches(&capiazure.AzureMachineTemplate{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool)). + Watches(&hyperv1.HostedCluster{}, handler.EnqueueRequestsFromMapFunc(r.enqueueNodePoolsForHostedCluster), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). + Watches(&capiv1.MachineDeployment{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). + Watches(&capiv1.MachineSet{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). + Watches(&capiaws.AWSMachineTemplate{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). + Watches(&agentv1.AgentMachineTemplate{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). + Watches(&capiazure.AzureMachineTemplate{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). // We want to reconcile when the user data Secret or the token Secret is unexpectedly changed out of band. - Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool)). + Watches(&corev1.Secret{}, handler.EnqueueRequestsFromMapFunc(enqueueParentNodePool), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). // We want to reconcile when the ConfigMaps referenced by the spec.config and also the core ones change. - Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.enqueueNodePoolsForConfig)). + Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.enqueueNodePoolsForConfig), builder.WithPredicates(supportutil.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). WithOptions(controller.Options{ RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 10*time.Second), MaxConcurrentReconciles: 10, @@ -254,6 +256,23 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho proxy := globalconfig.ProxyConfig() globalconfig.ReconcileProxyConfigWithStatusFromHostedCluster(proxy, hcluster) + // NOTE: The image global config is not injected via userdata or NodePool ignition config. + // It is included directly by the ignition server. However, we need to detect the change + // here to trigger a nodepool update. + image := globalconfig.ImageConfig() + globalconfig.ReconcileImageConfigFromHostedCluster(image, hcluster) + + // Serialize proxy and image into a single string to use in the token secret hash. + globalConfigBytes := bytes.NewBuffer(nil) + enc := json.NewEncoder(globalConfigBytes) + if err := enc.Encode(proxy); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to encode proxy global config: %w", err) + } + if err := enc.Encode(image); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to encode image global config: %w", err) + } + globalConfig := globalConfigBytes.String() + // Validate autoscaling input. if err := validateAutoscaling(nodePool); err != nil { SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{ @@ -589,7 +608,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho } // Check if config needs to be updated. - targetConfigHash := supportutil.HashStruct(config + pullSecretName) + targetConfigHash := supportutil.HashSimple(config + pullSecretName) isUpdatingConfig := isUpdatingConfig(nodePool, targetConfigHash) if isUpdatingConfig { SetStatusCondition(&nodePool.Status.Conditions, hyperv1.NodePoolCondition{ @@ -624,7 +643,7 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho } // Signal ignition payload generation - targetPayloadConfigHash := supportutil.HashStruct(config + targetVersion + pullSecretName) + targetPayloadConfigHash := supportutil.HashSimple(config + targetVersion + pullSecretName + globalConfig) tokenSecret := TokenSecret(controlPlaneNamespace, nodePool.Name, targetPayloadConfigHash) condition, err := r.createValidGeneratedPayloadCondition(ctx, tokenSecret, nodePool.Generation) if err != nil { @@ -730,6 +749,10 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho status = corev1.ConditionFalse reason = hyperv1.NodePoolNotFoundReason message = "No Machines are created" + if nodePool.Spec.Replicas != nil && *nodePool.Spec.Replicas == 0 { + reason = hyperv1.AsExpectedReason + message = "NodePool set to no replicas" + } } // Aggregate conditions. @@ -779,6 +802,10 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho status = corev1.ConditionFalse reason = hyperv1.NodePoolNotFoundReason message = "No Machines are created" + if nodePool.Spec.Replicas != nil && *nodePool.Spec.Replicas == 0 { + reason = hyperv1.AsExpectedReason + message = "NodePool set to no replicas" + } } for _, machine := range machines { @@ -859,8 +886,12 @@ func (r *NodePoolReconciler) reconcile(ctx context.Context, hcluster *hyperv1.Ho if err != nil { return ctrl.Result{}, fmt.Errorf("failed to get pull secret bytes: %w", err) } + hcConfigurationHash, err := supportutil.HashStruct(hcluster.Spec.Configuration) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to hash HostedCluster configuration: %w", err) + } if result, err := r.CreateOrUpdate(ctx, r.Client, tokenSecret, func() error { - return reconcileTokenSecret(tokenSecret, nodePool, compressedConfig.Bytes(), pullSecretBytes) + return reconcileTokenSecret(tokenSecret, nodePool, compressedConfig.Bytes(), pullSecretBytes, hcConfigurationHash) }); err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile token Secret: %w", err) } else { @@ -1445,7 +1476,7 @@ func reconcileTuningConfigMap(tuningConfigMap *corev1.ConfigMap, nodePool *hyper return nil } -func reconcileTokenSecret(tokenSecret *corev1.Secret, nodePool *hyperv1.NodePool, compressedConfig []byte, pullSecret []byte) error { +func reconcileTokenSecret(tokenSecret *corev1.Secret, nodePool *hyperv1.NodePool, compressedConfig []byte, pullSecret []byte, hcConfigurationHash string) error { // The token secret controller updates expired token IDs for token Secrets. // When that happens the NodePool controller reconciles the userData Secret with the new token ID. // Therefore, this secret is mutable. @@ -1466,7 +1497,8 @@ func reconcileTokenSecret(tokenSecret *corev1.Secret, nodePool *hyperv1.NodePool tokenSecret.Data[TokenSecretTokenKey] = []byte(uuid.New().String()) tokenSecret.Data[TokenSecretReleaseKey] = []byte(nodePool.Spec.Release.Image) tokenSecret.Data[TokenSecretConfigKey] = compressedConfig - tokenSecret.Data[TokenSecretPullSecretHashKey] = []byte(supportutil.HashStruct(pullSecret)) + tokenSecret.Data[TokenSecretPullSecretHashKey] = []byte(supportutil.HashSimple(pullSecret)) + tokenSecret.Data[TokenSecretHCConfigurationHashKey] = []byte(hcConfigurationHash) } return nil } @@ -1710,6 +1742,15 @@ func (r *NodePoolReconciler) reconcileMachineHealthCheck(mhc *capiv1.MachineHeal // https://github.com/openshift/managed-cluster-config/blob/14d4255ec75dc263ffd3d897dfccc725cb2b7072/deploy/osd-machine-api/011-machine-api.srep-worker-healthcheck.MachineHealthCheck.yaml // TODO (alberto): possibly expose this config at the nodePool API. maxUnhealthy := intstr.FromInt(2) + var timeOut time.Duration + + switch nodePool.Spec.Platform.Type { + case hyperv1.AgentPlatform, hyperv1.NonePlatform: + timeOut = 16 * time.Minute + default: + timeOut = 8 * time.Minute + } + resourcesName := generateName(CAPIClusterName, nodePool.Spec.ClusterName, nodePool.GetName()) mhc.Spec = capiv1.MachineHealthCheckSpec{ ClusterName: CAPIClusterName, @@ -1723,14 +1764,14 @@ func (r *NodePoolReconciler) reconcileMachineHealthCheck(mhc *capiv1.MachineHeal Type: corev1.NodeReady, Status: corev1.ConditionFalse, Timeout: metav1.Duration{ - Duration: 8 * time.Minute, + Duration: timeOut, }, }, { Type: corev1.NodeReady, Status: corev1.ConditionUnknown, Timeout: metav1.Duration{ - Duration: 8 * time.Minute, + Duration: timeOut, }, }, }, @@ -2027,12 +2068,12 @@ func defaultAndValidateConfigManifest(manifest []byte) ([]byte, error) { _ = v1alpha1.Install(scheme) _ = configv1.Install(scheme) - YamlSerializer := serializer.NewSerializerWithOptions( + yamlSerializer := serializer.NewSerializerWithOptions( serializer.DefaultMetaFactory, scheme, scheme, serializer.SerializerOptions{Yaml: true, Pretty: true, Strict: false}, ) - cr, _, err := YamlSerializer.Decode(manifest, nil, nil) + cr, _, err := yamlSerializer.Decode(manifest, nil, nil) if err != nil { return nil, fmt.Errorf("error decoding config: %w", err) } @@ -2044,14 +2085,34 @@ func defaultAndValidateConfigManifest(manifest []byte) ([]byte, error) { } obj.Labels["machineconfiguration.openshift.io/role"] = "worker" buff := bytes.Buffer{} - if err := YamlSerializer.Encode(obj, &buff); err != nil { + if err := yamlSerializer.Encode(obj, &buff); err != nil { return nil, fmt.Errorf("failed to encode config after defaulting it: %w", err) } manifest = buff.Bytes() case *v1alpha1.ImageContentSourcePolicy: case *configv1.ImageDigestMirrorSet: case *mcfgv1.KubeletConfig: + obj.Spec.MachineConfigPoolSelector = &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machineconfiguration.openshift.io/mco-built-in": "", + }, + } + buff := bytes.Buffer{} + if err := yamlSerializer.Encode(obj, &buff); err != nil { + return nil, fmt.Errorf("failed to encode kubelet config after setting built-in MCP selector: %w", err) + } + manifest = buff.Bytes() case *mcfgv1.ContainerRuntimeConfig: + obj.Spec.MachineConfigPoolSelector = &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machineconfiguration.openshift.io/mco-built-in": "", + }, + } + buff := bytes.Buffer{} + if err := yamlSerializer.Encode(obj, &buff); err != nil { + return nil, fmt.Errorf("failed to encode container runtime config after setting built-in MCP selector: %w", err) + } + manifest = buff.Bytes() default: return nil, fmt.Errorf("unsupported config type: %T", obj) } @@ -2410,13 +2471,13 @@ func getName(base, suffix string, maxLength int) string { if baseLength < 1 { prefix := base[0:min(len(base), max(0, maxLength-9))] // Calculate hash on initial base-suffix string - shortName := fmt.Sprintf("%s-%s", prefix, supportutil.HashStruct(name)) + shortName := fmt.Sprintf("%s-%s", prefix, supportutil.HashSimple(name)) return shortName[:min(maxLength, len(shortName))] } prefix := base[0:baseLength] // Calculate hash on initial base-suffix string - return fmt.Sprintf("%s-%s-%s", prefix, supportutil.HashStruct(base), suffix) + return fmt.Sprintf("%s-%s-%s", prefix, supportutil.HashSimple(base), suffix) } // max returns the greater of its 2 inputs @@ -2560,7 +2621,7 @@ func machineTemplateBuilders(hcluster *hyperv1.HostedCluster, nodePool *hyperv1. func generateMachineTemplateName(nodePool *hyperv1.NodePool, machineTemplateSpecJSON []byte) string { // using HashStruct(machineTemplateSpecJSON) ensures a rolling upgrade is triggered // by creating a new template with a differnt name if any field changes. - return getName(nodePool.GetName(), supportutil.HashStruct(machineTemplateSpecJSON), + return getName(nodePool.GetName(), supportutil.HashSimple(machineTemplateSpecJSON), validation.DNS1123SubdomainMaxLength) } diff --git a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go index 0ac5216e2b..2c3bae97b3 100644 --- a/hypershift-operator/controllers/nodepool/nodepool_controller_test.go +++ b/hypershift-operator/controllers/nodepool/nodepool_controller_test.go @@ -410,6 +410,20 @@ spec: pools.operator.machineconfiguration.openshift.io/worker: "" kubeletConfig: maxPods: 100 +` + kubeletConfig1Defaulted := `apiVersion: machineconfiguration.openshift.io/v1 +kind: KubeletConfig +metadata: + creationTimestamp: null + name: set-max-pods +spec: + kubeletConfig: + maxPods: 100 + machineConfigPoolSelector: + matchLabels: + machineconfiguration.openshift.io/mco-built-in: "" +status: + conditions: null ` haproxyIgnititionConfig := `apiVersion: machineconfiguration.openshift.io/v1 kind: MachineConfig @@ -469,6 +483,31 @@ spec: kernelType: "" osImageURL: "" ` + containerRuntimeConfig1 := `apiVersion: machineconfiguration.openshift.io/v1 +kind: ContainerRuntimeConfig +metadata: + name: set-pids-limit +spec: + containerRuntimeConfig: + pidsLimit: 2048 +` + + containerRuntimeConfig1Defaulted := `apiVersion: machineconfiguration.openshift.io/v1 +kind: ContainerRuntimeConfig +metadata: + creationTimestamp: null + name: set-pids-limit +spec: + containerRuntimeConfig: + logSizeMax: "0" + overlaySize: "0" + pidsLimit: 2048 + machineConfigPoolSelector: + matchLabels: + machineconfiguration.openshift.io/mco-built-in: "" +status: + conditions: null +` namespace := "test" testCases := []struct { @@ -572,7 +611,35 @@ spec: error: true, }, { - name: "fails if a non supported config kind", + name: "gets a single valid ContainerRuntimeConfig", + nodePool: &hyperv1.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + }, + Spec: hyperv1.NodePoolSpec{ + Config: []corev1.LocalObjectReference{ + { + Name: "containerRuntimeConfig-1", + }, + }, + }, + }, + config: []client.Object{ + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "containerRuntimeConfig-1", + Namespace: namespace, + }, + Data: map[string]string{ + TokenSecretConfigKey: containerRuntimeConfig1, + }, + }, + }, + expect: containerRuntimeConfig1Defaulted, + error: false, + }, + { + name: "gets a single valid KubeletConfig", nodePool: &hyperv1.NodePool{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, @@ -596,7 +663,7 @@ spec: }, }, }, - expect: kubeletConfig1, + expect: kubeletConfig1Defaulted, error: false, }, { diff --git a/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig.yaml b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_empty_proxy_it_should_create_an_haproxy.yaml similarity index 100% rename from hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig.yaml rename to hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_empty_proxy_it_should_create_an_haproxy.yaml diff --git a/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_clusterNetwork_it_should_create_an_haproxy.yaml b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_clusterNetwork_it_should_create_an_haproxy.yaml new file mode 100644 index 0000000000..6a6b696d5d --- /dev/null +++ b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_clusterNetwork_it_should_create_an_haproxy.yaml @@ -0,0 +1,42 @@ +ignition: + version: 3.2.0 +storage: + files: + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBicmQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbSBzY29wZSBob3N0IGRldiBsbwppcCByb3V0ZSBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8gc2NvcGUgbGluayBzcmMgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQo= + mode: 493 + overwrite: true + path: /usr/local/bin/setup-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBkZWxldGUgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8KaXAgcm91dGUgZGVsIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20vMzIgZGV2IGxvIHNjb3BlIGxpbmsgc3JjIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20K + mode: 493 + overwrite: true + path: /usr/local/bin/teardown-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,Z2xvYmFsCiAgbWF4Y29ubiA3MDAwCiAgbG9nIHN0ZG91dCBsb2NhbDAKICBsb2cgc3Rkb3V0IGxvY2FsMSBub3RpY2UKCmRlZmF1bHRzCiAgbW9kZSB0Y3AKICB0aW1lb3V0IGNsaWVudCAxMG0KICB0aW1lb3V0IHNlcnZlciAxMG0KICB0aW1lb3V0IGNvbm5lY3QgMTBzCiAgdGltZW91dCBjbGllbnQtZmluIDVzCiAgdGltZW91dCBzZXJ2ZXItZmluIDVzCiAgdGltZW91dCBxdWV1ZSA1cwogIHJldHJpZXMgMwoKZnJvbnRlbmQgbG9jYWxfYXBpc2VydmVyCiAgYmluZCBjbHVzdGVyLmludGVybmFsLmV4YW1wbGUuY29tOjg0NDMKICBsb2cgZ2xvYmFsCiAgbW9kZSB0Y3AKICBvcHRpb24gdGNwbG9nCiAgZGVmYXVsdF9iYWNrZW5kIHJlbW90ZV9hcGlzZXJ2ZXIKCmJhY2tlbmQgcmVtb3RlX2FwaXNlcnZlcgogIG1vZGUgdGNwCiAgbG9nIGdsb2JhbAogIG9wdGlvbiBodHRwY2hrIEdFVCAvdmVyc2lvbgogIG9wdGlvbiBsb2ctaGVhbHRoLWNoZWNrcwogIGRlZmF1bHQtc2VydmVyIGludGVyIDEwcyBmYWxsIDMgcmlzZSAzCiAgc2VydmVyIGNvbnRyb2xwbGFuZSBjbHVzdGVyLmV4YW1wbGUuY29tOjQ0Mwo= + mode: 420 + overwrite: true + path: /etc/kubernetes/apiserver-proxy-config/haproxy.cfg + - contents: + source: data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjogdjEKa2luZDogUG9kCm1ldGFkYXRhOgogIGNyZWF0aW9uVGltZXN0YW1wOiBudWxsCiAgbGFiZWxzOgogICAgazhzLWFwcDoga3ViZS1hcGlzZXJ2ZXItcHJveHkKICBuYW1lOiBrdWJlLWFwaXNlcnZlci1wcm94eQogIG5hbWVzcGFjZToga3ViZS1zeXN0ZW0Kc3BlYzoKICBjb250YWluZXJzOgogIC0gY29tbWFuZDoKICAgIC0gaGFwcm94eQogICAgLSAtZgogICAgLSAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICBpbWFnZTogaGEtcHJveHktaW1hZ2U6bGF0ZXN0CiAgICBsaXZlbmVzc1Byb2JlOgogICAgICBmYWlsdXJlVGhyZXNob2xkOiAzCiAgICAgIGh0dHBHZXQ6CiAgICAgICAgaG9zdDogY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQogICAgICAgIHBhdGg6IC92ZXJzaW9uCiAgICAgICAgcG9ydDogODQ0MwogICAgICAgIHNjaGVtZTogSFRUUFMKICAgICAgaW5pdGlhbERlbGF5U2Vjb25kczogMTIwCiAgICAgIHBlcmlvZFNlY29uZHM6IDEyMAogICAgICBzdWNjZXNzVGhyZXNob2xkOiAxCiAgICBuYW1lOiBoYXByb3h5CiAgICBwb3J0czoKICAgIC0gY29udGFpbmVyUG9ydDogODQ0MwogICAgICBob3N0UG9ydDogODQ0MwogICAgICBuYW1lOiBhcGlzZXJ2ZXIKICAgICAgcHJvdG9jb2w6IFRDUAogICAgcmVzb3VyY2VzOgogICAgICByZXF1ZXN0czoKICAgICAgICBjcHU6IDEzbQogICAgICAgIG1lbW9yeTogMTZNaQogICAgc2VjdXJpdHlDb250ZXh0OgogICAgICBydW5Bc1VzZXI6IDEwMDEKICAgIHZvbHVtZU1vdW50czoKICAgIC0gbW91bnRQYXRoOiAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICAgIG5hbWU6IGNvbmZpZwogIGhvc3ROZXR3b3JrOiB0cnVlCiAgcHJpb3JpdHlDbGFzc05hbWU6IHN5c3RlbS1ub2RlLWNyaXRpY2FsCiAgdm9sdW1lczoKICAtIGhvc3RQYXRoOgogICAgICBwYXRoOiAvZXRjL2t1YmVybmV0ZXMvYXBpc2VydmVyLXByb3h5LWNvbmZpZwogICAgbmFtZTogY29uZmlnCnN0YXR1czoge30K + mode: 420 + overwrite: true + path: /etc/kubernetes/manifests/kube-apiserver-proxy.yaml +systemd: + units: + - contents: | + [Unit] + Description=Sets up local IP to proxy API server requests + Wants=network-online.target + After=network-online.target + + [Service] + Type=oneshot + ExecStart=/usr/local/bin/setup-apiserver-ip.sh + ExecStop=/usr/local/bin/teardown-apiserver-ip.sh + RemainAfterExit=yes + + [Install] + WantedBy=multi-user.target + enabled: true + name: apiserver-ip.service diff --git a/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_internalAddress_it_should_create_an_haproxy.yaml b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_internalAddress_it_should_create_an_haproxy.yaml new file mode 100644 index 0000000000..6a6b696d5d --- /dev/null +++ b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_internalAddress_it_should_create_an_haproxy.yaml @@ -0,0 +1,42 @@ +ignition: + version: 3.2.0 +storage: + files: + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBicmQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbSBzY29wZSBob3N0IGRldiBsbwppcCByb3V0ZSBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8gc2NvcGUgbGluayBzcmMgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQo= + mode: 493 + overwrite: true + path: /usr/local/bin/setup-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBkZWxldGUgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8KaXAgcm91dGUgZGVsIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20vMzIgZGV2IGxvIHNjb3BlIGxpbmsgc3JjIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20K + mode: 493 + overwrite: true + path: /usr/local/bin/teardown-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,Z2xvYmFsCiAgbWF4Y29ubiA3MDAwCiAgbG9nIHN0ZG91dCBsb2NhbDAKICBsb2cgc3Rkb3V0IGxvY2FsMSBub3RpY2UKCmRlZmF1bHRzCiAgbW9kZSB0Y3AKICB0aW1lb3V0IGNsaWVudCAxMG0KICB0aW1lb3V0IHNlcnZlciAxMG0KICB0aW1lb3V0IGNvbm5lY3QgMTBzCiAgdGltZW91dCBjbGllbnQtZmluIDVzCiAgdGltZW91dCBzZXJ2ZXItZmluIDVzCiAgdGltZW91dCBxdWV1ZSA1cwogIHJldHJpZXMgMwoKZnJvbnRlbmQgbG9jYWxfYXBpc2VydmVyCiAgYmluZCBjbHVzdGVyLmludGVybmFsLmV4YW1wbGUuY29tOjg0NDMKICBsb2cgZ2xvYmFsCiAgbW9kZSB0Y3AKICBvcHRpb24gdGNwbG9nCiAgZGVmYXVsdF9iYWNrZW5kIHJlbW90ZV9hcGlzZXJ2ZXIKCmJhY2tlbmQgcmVtb3RlX2FwaXNlcnZlcgogIG1vZGUgdGNwCiAgbG9nIGdsb2JhbAogIG9wdGlvbiBodHRwY2hrIEdFVCAvdmVyc2lvbgogIG9wdGlvbiBsb2ctaGVhbHRoLWNoZWNrcwogIGRlZmF1bHQtc2VydmVyIGludGVyIDEwcyBmYWxsIDMgcmlzZSAzCiAgc2VydmVyIGNvbnRyb2xwbGFuZSBjbHVzdGVyLmV4YW1wbGUuY29tOjQ0Mwo= + mode: 420 + overwrite: true + path: /etc/kubernetes/apiserver-proxy-config/haproxy.cfg + - contents: + source: data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjogdjEKa2luZDogUG9kCm1ldGFkYXRhOgogIGNyZWF0aW9uVGltZXN0YW1wOiBudWxsCiAgbGFiZWxzOgogICAgazhzLWFwcDoga3ViZS1hcGlzZXJ2ZXItcHJveHkKICBuYW1lOiBrdWJlLWFwaXNlcnZlci1wcm94eQogIG5hbWVzcGFjZToga3ViZS1zeXN0ZW0Kc3BlYzoKICBjb250YWluZXJzOgogIC0gY29tbWFuZDoKICAgIC0gaGFwcm94eQogICAgLSAtZgogICAgLSAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICBpbWFnZTogaGEtcHJveHktaW1hZ2U6bGF0ZXN0CiAgICBsaXZlbmVzc1Byb2JlOgogICAgICBmYWlsdXJlVGhyZXNob2xkOiAzCiAgICAgIGh0dHBHZXQ6CiAgICAgICAgaG9zdDogY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQogICAgICAgIHBhdGg6IC92ZXJzaW9uCiAgICAgICAgcG9ydDogODQ0MwogICAgICAgIHNjaGVtZTogSFRUUFMKICAgICAgaW5pdGlhbERlbGF5U2Vjb25kczogMTIwCiAgICAgIHBlcmlvZFNlY29uZHM6IDEyMAogICAgICBzdWNjZXNzVGhyZXNob2xkOiAxCiAgICBuYW1lOiBoYXByb3h5CiAgICBwb3J0czoKICAgIC0gY29udGFpbmVyUG9ydDogODQ0MwogICAgICBob3N0UG9ydDogODQ0MwogICAgICBuYW1lOiBhcGlzZXJ2ZXIKICAgICAgcHJvdG9jb2w6IFRDUAogICAgcmVzb3VyY2VzOgogICAgICByZXF1ZXN0czoKICAgICAgICBjcHU6IDEzbQogICAgICAgIG1lbW9yeTogMTZNaQogICAgc2VjdXJpdHlDb250ZXh0OgogICAgICBydW5Bc1VzZXI6IDEwMDEKICAgIHZvbHVtZU1vdW50czoKICAgIC0gbW91bnRQYXRoOiAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICAgIG5hbWU6IGNvbmZpZwogIGhvc3ROZXR3b3JrOiB0cnVlCiAgcHJpb3JpdHlDbGFzc05hbWU6IHN5c3RlbS1ub2RlLWNyaXRpY2FsCiAgdm9sdW1lczoKICAtIGhvc3RQYXRoOgogICAgICBwYXRoOiAvZXRjL2t1YmVybmV0ZXMvYXBpc2VydmVyLXByb3h5LWNvbmZpZwogICAgbmFtZTogY29uZmlnCnN0YXR1czoge30K + mode: 420 + overwrite: true + path: /etc/kubernetes/manifests/kube-apiserver-proxy.yaml +systemd: + units: + - contents: | + [Unit] + Description=Sets up local IP to proxy API server requests + Wants=network-online.target + After=network-online.target + + [Service] + Type=oneshot + ExecStart=/usr/local/bin/setup-apiserver-ip.sh + ExecStop=/usr/local/bin/teardown-apiserver-ip.sh + RemainAfterExit=yes + + [Install] + WantedBy=multi-user.target + enabled: true + name: apiserver-ip.service diff --git a/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_kubernetes_it_should_create_an_haproxy.yaml b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_kubernetes_it_should_create_an_haproxy.yaml new file mode 100644 index 0000000000..6a6b696d5d --- /dev/null +++ b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_kubernetes_it_should_create_an_haproxy.yaml @@ -0,0 +1,42 @@ +ignition: + version: 3.2.0 +storage: + files: + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBicmQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbSBzY29wZSBob3N0IGRldiBsbwppcCByb3V0ZSBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8gc2NvcGUgbGluayBzcmMgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQo= + mode: 493 + overwrite: true + path: /usr/local/bin/setup-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBkZWxldGUgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8KaXAgcm91dGUgZGVsIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20vMzIgZGV2IGxvIHNjb3BlIGxpbmsgc3JjIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20K + mode: 493 + overwrite: true + path: /usr/local/bin/teardown-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,Z2xvYmFsCiAgbWF4Y29ubiA3MDAwCiAgbG9nIHN0ZG91dCBsb2NhbDAKICBsb2cgc3Rkb3V0IGxvY2FsMSBub3RpY2UKCmRlZmF1bHRzCiAgbW9kZSB0Y3AKICB0aW1lb3V0IGNsaWVudCAxMG0KICB0aW1lb3V0IHNlcnZlciAxMG0KICB0aW1lb3V0IGNvbm5lY3QgMTBzCiAgdGltZW91dCBjbGllbnQtZmluIDVzCiAgdGltZW91dCBzZXJ2ZXItZmluIDVzCiAgdGltZW91dCBxdWV1ZSA1cwogIHJldHJpZXMgMwoKZnJvbnRlbmQgbG9jYWxfYXBpc2VydmVyCiAgYmluZCBjbHVzdGVyLmludGVybmFsLmV4YW1wbGUuY29tOjg0NDMKICBsb2cgZ2xvYmFsCiAgbW9kZSB0Y3AKICBvcHRpb24gdGNwbG9nCiAgZGVmYXVsdF9iYWNrZW5kIHJlbW90ZV9hcGlzZXJ2ZXIKCmJhY2tlbmQgcmVtb3RlX2FwaXNlcnZlcgogIG1vZGUgdGNwCiAgbG9nIGdsb2JhbAogIG9wdGlvbiBodHRwY2hrIEdFVCAvdmVyc2lvbgogIG9wdGlvbiBsb2ctaGVhbHRoLWNoZWNrcwogIGRlZmF1bHQtc2VydmVyIGludGVyIDEwcyBmYWxsIDMgcmlzZSAzCiAgc2VydmVyIGNvbnRyb2xwbGFuZSBjbHVzdGVyLmV4YW1wbGUuY29tOjQ0Mwo= + mode: 420 + overwrite: true + path: /etc/kubernetes/apiserver-proxy-config/haproxy.cfg + - contents: + source: data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjogdjEKa2luZDogUG9kCm1ldGFkYXRhOgogIGNyZWF0aW9uVGltZXN0YW1wOiBudWxsCiAgbGFiZWxzOgogICAgazhzLWFwcDoga3ViZS1hcGlzZXJ2ZXItcHJveHkKICBuYW1lOiBrdWJlLWFwaXNlcnZlci1wcm94eQogIG5hbWVzcGFjZToga3ViZS1zeXN0ZW0Kc3BlYzoKICBjb250YWluZXJzOgogIC0gY29tbWFuZDoKICAgIC0gaGFwcm94eQogICAgLSAtZgogICAgLSAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICBpbWFnZTogaGEtcHJveHktaW1hZ2U6bGF0ZXN0CiAgICBsaXZlbmVzc1Byb2JlOgogICAgICBmYWlsdXJlVGhyZXNob2xkOiAzCiAgICAgIGh0dHBHZXQ6CiAgICAgICAgaG9zdDogY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQogICAgICAgIHBhdGg6IC92ZXJzaW9uCiAgICAgICAgcG9ydDogODQ0MwogICAgICAgIHNjaGVtZTogSFRUUFMKICAgICAgaW5pdGlhbERlbGF5U2Vjb25kczogMTIwCiAgICAgIHBlcmlvZFNlY29uZHM6IDEyMAogICAgICBzdWNjZXNzVGhyZXNob2xkOiAxCiAgICBuYW1lOiBoYXByb3h5CiAgICBwb3J0czoKICAgIC0gY29udGFpbmVyUG9ydDogODQ0MwogICAgICBob3N0UG9ydDogODQ0MwogICAgICBuYW1lOiBhcGlzZXJ2ZXIKICAgICAgcHJvdG9jb2w6IFRDUAogICAgcmVzb3VyY2VzOgogICAgICByZXF1ZXN0czoKICAgICAgICBjcHU6IDEzbQogICAgICAgIG1lbW9yeTogMTZNaQogICAgc2VjdXJpdHlDb250ZXh0OgogICAgICBydW5Bc1VzZXI6IDEwMDEKICAgIHZvbHVtZU1vdW50czoKICAgIC0gbW91bnRQYXRoOiAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICAgIG5hbWU6IGNvbmZpZwogIGhvc3ROZXR3b3JrOiB0cnVlCiAgcHJpb3JpdHlDbGFzc05hbWU6IHN5c3RlbS1ub2RlLWNyaXRpY2FsCiAgdm9sdW1lczoKICAtIGhvc3RQYXRoOgogICAgICBwYXRoOiAvZXRjL2t1YmVybmV0ZXMvYXBpc2VydmVyLXByb3h5LWNvbmZpZwogICAgbmFtZTogY29uZmlnCnN0YXR1czoge30K + mode: 420 + overwrite: true + path: /etc/kubernetes/manifests/kube-apiserver-proxy.yaml +systemd: + units: + - contents: | + [Unit] + Description=Sets up local IP to proxy API server requests + Wants=network-online.target + After=network-online.target + + [Service] + Type=oneshot + ExecStart=/usr/local/bin/setup-apiserver-ip.sh + ExecStop=/usr/local/bin/teardown-apiserver-ip.sh + RemainAfterExit=yes + + [Install] + WantedBy=multi-user.target + enabled: true + name: apiserver-ip.service diff --git a/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_serviceNetwork_it_should_create_an_haproxy.yaml b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_serviceNetwork_it_should_create_an_haproxy.yaml new file mode 100644 index 0000000000..6a6b696d5d --- /dev/null +++ b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestAPIServerHAProxyConfig_when_noproxy_matches_serviceNetwork_it_should_create_an_haproxy.yaml @@ -0,0 +1,42 @@ +ignition: + version: 3.2.0 +storage: + files: + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBicmQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbSBzY29wZSBob3N0IGRldiBsbwppcCByb3V0ZSBhZGQgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8gc2NvcGUgbGluayBzcmMgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQo= + mode: 493 + overwrite: true + path: /usr/local/bin/setup-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaApzZXQgLXgKaXAgYWRkciBkZWxldGUgY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbS8zMiBkZXYgbG8KaXAgcm91dGUgZGVsIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20vMzIgZGV2IGxvIHNjb3BlIGxpbmsgc3JjIGNsdXN0ZXIuaW50ZXJuYWwuZXhhbXBsZS5jb20K + mode: 493 + overwrite: true + path: /usr/local/bin/teardown-apiserver-ip.sh + - contents: + source: data:text/plain;charset=utf-8;base64,Z2xvYmFsCiAgbWF4Y29ubiA3MDAwCiAgbG9nIHN0ZG91dCBsb2NhbDAKICBsb2cgc3Rkb3V0IGxvY2FsMSBub3RpY2UKCmRlZmF1bHRzCiAgbW9kZSB0Y3AKICB0aW1lb3V0IGNsaWVudCAxMG0KICB0aW1lb3V0IHNlcnZlciAxMG0KICB0aW1lb3V0IGNvbm5lY3QgMTBzCiAgdGltZW91dCBjbGllbnQtZmluIDVzCiAgdGltZW91dCBzZXJ2ZXItZmluIDVzCiAgdGltZW91dCBxdWV1ZSA1cwogIHJldHJpZXMgMwoKZnJvbnRlbmQgbG9jYWxfYXBpc2VydmVyCiAgYmluZCBjbHVzdGVyLmludGVybmFsLmV4YW1wbGUuY29tOjg0NDMKICBsb2cgZ2xvYmFsCiAgbW9kZSB0Y3AKICBvcHRpb24gdGNwbG9nCiAgZGVmYXVsdF9iYWNrZW5kIHJlbW90ZV9hcGlzZXJ2ZXIKCmJhY2tlbmQgcmVtb3RlX2FwaXNlcnZlcgogIG1vZGUgdGNwCiAgbG9nIGdsb2JhbAogIG9wdGlvbiBodHRwY2hrIEdFVCAvdmVyc2lvbgogIG9wdGlvbiBsb2ctaGVhbHRoLWNoZWNrcwogIGRlZmF1bHQtc2VydmVyIGludGVyIDEwcyBmYWxsIDMgcmlzZSAzCiAgc2VydmVyIGNvbnRyb2xwbGFuZSBjbHVzdGVyLmV4YW1wbGUuY29tOjQ0Mwo= + mode: 420 + overwrite: true + path: /etc/kubernetes/apiserver-proxy-config/haproxy.cfg + - contents: + source: data:text/plain;charset=utf-8;base64,YXBpVmVyc2lvbjogdjEKa2luZDogUG9kCm1ldGFkYXRhOgogIGNyZWF0aW9uVGltZXN0YW1wOiBudWxsCiAgbGFiZWxzOgogICAgazhzLWFwcDoga3ViZS1hcGlzZXJ2ZXItcHJveHkKICBuYW1lOiBrdWJlLWFwaXNlcnZlci1wcm94eQogIG5hbWVzcGFjZToga3ViZS1zeXN0ZW0Kc3BlYzoKICBjb250YWluZXJzOgogIC0gY29tbWFuZDoKICAgIC0gaGFwcm94eQogICAgLSAtZgogICAgLSAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICBpbWFnZTogaGEtcHJveHktaW1hZ2U6bGF0ZXN0CiAgICBsaXZlbmVzc1Byb2JlOgogICAgICBmYWlsdXJlVGhyZXNob2xkOiAzCiAgICAgIGh0dHBHZXQ6CiAgICAgICAgaG9zdDogY2x1c3Rlci5pbnRlcm5hbC5leGFtcGxlLmNvbQogICAgICAgIHBhdGg6IC92ZXJzaW9uCiAgICAgICAgcG9ydDogODQ0MwogICAgICAgIHNjaGVtZTogSFRUUFMKICAgICAgaW5pdGlhbERlbGF5U2Vjb25kczogMTIwCiAgICAgIHBlcmlvZFNlY29uZHM6IDEyMAogICAgICBzdWNjZXNzVGhyZXNob2xkOiAxCiAgICBuYW1lOiBoYXByb3h5CiAgICBwb3J0czoKICAgIC0gY29udGFpbmVyUG9ydDogODQ0MwogICAgICBob3N0UG9ydDogODQ0MwogICAgICBuYW1lOiBhcGlzZXJ2ZXIKICAgICAgcHJvdG9jb2w6IFRDUAogICAgcmVzb3VyY2VzOgogICAgICByZXF1ZXN0czoKICAgICAgICBjcHU6IDEzbQogICAgICAgIG1lbW9yeTogMTZNaQogICAgc2VjdXJpdHlDb250ZXh0OgogICAgICBydW5Bc1VzZXI6IDEwMDEKICAgIHZvbHVtZU1vdW50czoKICAgIC0gbW91bnRQYXRoOiAvdXNyL2xvY2FsL2V0Yy9oYXByb3h5CiAgICAgIG5hbWU6IGNvbmZpZwogIGhvc3ROZXR3b3JrOiB0cnVlCiAgcHJpb3JpdHlDbGFzc05hbWU6IHN5c3RlbS1ub2RlLWNyaXRpY2FsCiAgdm9sdW1lczoKICAtIGhvc3RQYXRoOgogICAgICBwYXRoOiAvZXRjL2t1YmVybmV0ZXMvYXBpc2VydmVyLXByb3h5LWNvbmZpZwogICAgbmFtZTogY29uZmlnCnN0YXR1czoge30K + mode: 420 + overwrite: true + path: /etc/kubernetes/manifests/kube-apiserver-proxy.yaml +systemd: + units: + - contents: | + [Unit] + Description=Sets up local IP to proxy API server requests + Wants=network-online.target + After=network-online.target + + [Service] + Type=oneshot + ExecStart=/usr/local/bin/setup-apiserver-ip.sh + ExecStop=/usr/local/bin/teardown-apiserver-ip.sh + RemainAfterExit=yes + + [Install] + WantedBy=multi-user.target + enabled: true + name: apiserver-ip.service diff --git a/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestReconcileHAProxyIgnitionConfig_private_cluster_uses_.local_address_and_LB_kas.yaml b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestReconcileHAProxyIgnitionConfig_private_cluster_uses_.local_address_and_LB_kas.yaml new file mode 100644 index 0000000000..b5c5981e99 --- /dev/null +++ b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestReconcileHAProxyIgnitionConfig_private_cluster_uses_.local_address_and_LB_kas.yaml @@ -0,0 +1,29 @@ +global + maxconn 7000 + log stdout local0 + log stdout local1 notice + +defaults + mode tcp + timeout client 10m + timeout server 10m + timeout connect 10s + timeout client-fin 5s + timeout server-fin 5s + timeout queue 5s + retries 3 + +frontend local_apiserver + bind 172.20.0.1:6443 + log global + mode tcp + option tcplog + default_backend remote_apiserver + +backend remote_apiserver + mode tcp + log global + option httpchk GET /version + option log-health-checks + default-server inter 10s fall 3 rise 3 + server controlplane api.hc.hypershift.local:6443 diff --git a/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestReconcileHAProxyIgnitionConfig_public_and_private_cluster_uses_.local_address_and_LB_kas.yaml b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestReconcileHAProxyIgnitionConfig_public_and_private_cluster_uses_.local_address_and_LB_kas.yaml new file mode 100644 index 0000000000..b5c5981e99 --- /dev/null +++ b/hypershift-operator/controllers/nodepool/testdata/zz_fixture_TestReconcileHAProxyIgnitionConfig_public_and_private_cluster_uses_.local_address_and_LB_kas.yaml @@ -0,0 +1,29 @@ +global + maxconn 7000 + log stdout local0 + log stdout local1 notice + +defaults + mode tcp + timeout client 10m + timeout server 10m + timeout connect 10s + timeout client-fin 5s + timeout server-fin 5s + timeout queue 5s + retries 3 + +frontend local_apiserver + bind 172.20.0.1:6443 + log global + mode tcp + option tcplog + default_backend remote_apiserver + +backend remote_apiserver + mode tcp + log global + option httpchk GET /version + option log-health-checks + default-server inter 10s fall 3 rise 3 + server controlplane api.hc.hypershift.local:6443 diff --git a/hypershift-operator/controllers/scheduler/dedicated_request_serving_nodes.go b/hypershift-operator/controllers/scheduler/dedicated_request_serving_nodes.go index 90dc86ca07..271dff3fde 100644 --- a/hypershift-operator/controllers/scheduler/dedicated_request_serving_nodes.go +++ b/hypershift-operator/controllers/scheduler/dedicated_request_serving_nodes.go @@ -7,11 +7,13 @@ import ( hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/support/upsert" + "github.com/openshift/hypershift/support/util" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -94,7 +96,7 @@ func (r *DedicatedServingComponentScheduler) SetupWithManager(mgr ctrl.Manager, r.createOrUpdate = createOrUpdateProvider.CreateOrUpdate builder := ctrl.NewControllerManagedBy(mgr). - For(&hyperv1.HostedCluster{}). + For(&hyperv1.HostedCluster{}, builder.WithPredicates(util.PredicatesForHostedClusterAnnotationScoping(mgr.GetClient()))). WithOptions(controller.Options{ RateLimiter: workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, 10*time.Second), MaxConcurrentReconciles: 10, diff --git a/hypershift-operator/controllers/supportedversion/reconciler.go b/hypershift-operator/controllers/supportedversion/reconciler.go index a6f85f42fb..e58c5d3954 100644 --- a/hypershift-operator/controllers/supportedversion/reconciler.go +++ b/hypershift-operator/controllers/supportedversion/reconciler.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" + "github.com/openshift/hypershift/pkg/version" corev1 "k8s.io/api/core/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" @@ -22,8 +23,9 @@ import ( ) const ( - configMapKey = "supported-versions" - supportedVersionsLabel = "hypershift.openshift.io/supported-versions" + ConfigMapVersionsKey = "supported-versions" + ConfigMapServerVersionKey = "server-version" + supportedVersionsLabel = "hypershift.openshift.io/supported-versions" ) type Reconciler struct { @@ -56,7 +58,7 @@ func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { return nil } -type supportedVersions struct { +type SupportedVersions struct { Versions []string `json:"versions"` } @@ -71,7 +73,7 @@ func (r *Reconciler) ensureSupportedVersionConfigMap(ctx context.Context) error } cm.Labels[supportedVersionsLabel] = "true" if _, err := r.CreateOrUpdate(ctx, r, cm, func() error { - content := &supportedVersions{ + content := &SupportedVersions{ Versions: supportedversion.Supported(), } contentBytes, err := json.Marshal(content) @@ -81,7 +83,8 @@ func (r *Reconciler) ensureSupportedVersionConfigMap(ctx context.Context) error if cm.Data == nil { cm.Data = map[string]string{} } - cm.Data[configMapKey] = string(contentBytes) + cm.Data[ConfigMapVersionsKey] = string(contentBytes) + cm.Data[ConfigMapServerVersionKey] = version.GetRevision() return nil }); err != nil { return fmt.Errorf("failed to update supported version configmap: %w", err) diff --git a/hypershift-operator/controllers/supportedversion/reconciler_test.go b/hypershift-operator/controllers/supportedversion/reconciler_test.go index f53df2b12d..9bdd1d0d7a 100644 --- a/hypershift-operator/controllers/supportedversion/reconciler_test.go +++ b/hypershift-operator/controllers/supportedversion/reconciler_test.go @@ -6,7 +6,6 @@ import ( "testing" . "github.com/onsi/gomega" - manifests "github.com/openshift/hypershift/hypershift-operator/controllers/manifests/supportedversion" "github.com/openshift/hypershift/support/supportedversion" "github.com/openshift/hypershift/support/upsert" @@ -28,9 +27,9 @@ func TestEnsureSupportedVersionConfigMap(t *testing.T) { cfgMap := manifests.ConfigMap("hypershift") err = c.Get(context.Background(), client.ObjectKeyFromObject(cfgMap), cfgMap) g.Expect(err).To(BeNil()) - g.Expect(cfgMap.Data[configMapKey]).ToNot(BeEmpty()) - data := &supportedVersions{} - err = json.Unmarshal([]byte(cfgMap.Data[configMapKey]), data) + g.Expect(cfgMap.Data[ConfigMapVersionsKey]).ToNot(BeEmpty()) + data := &SupportedVersions{} + err = json.Unmarshal([]byte(cfgMap.Data[ConfigMapVersionsKey]), data) g.Expect(err).To(BeNil()) g.Expect(len(data.Versions)).To(Equal(supportedversion.SupportedPreviousMinorVersions + 1)) } diff --git a/hypershift-operator/controllers/uwmtelemetry/uwm_telemetry.go b/hypershift-operator/controllers/uwmtelemetry/uwm_telemetry.go index 0042371370..14dd283013 100644 --- a/hypershift-operator/controllers/uwmtelemetry/uwm_telemetry.go +++ b/hypershift-operator/controllers/uwmtelemetry/uwm_telemetry.go @@ -98,6 +98,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, r.errorHandler(operatorDeployment, fmt.Errorf("failed to get clusterversion resource: %w", err)) } + telemeterClientSecret := monitoring.TelemeterClientSecret() + if err := r.Get(ctx, client.ObjectKeyFromObject(telemeterClientSecret), telemeterClientSecret); err != nil { + log.Info("user-workload-monitoring (UWM) telemetry remote writer is disabled because the 'telemeter-client' secret does not exist.") + return ctrl.Result{}, nil + } + if err := r.reconcileTelemetryRemoteWrite(ctx, string(clusterVersion.Spec.ClusterID)); err != nil { return ctrl.Result{}, r.errorHandler(operatorDeployment, err) } diff --git a/hypershift-operator/init.go b/hypershift-operator/init.go index cedc3110ba..d25a04a86d 100644 --- a/hypershift-operator/init.go +++ b/hypershift-operator/init.go @@ -130,7 +130,9 @@ func getImageRegistryCABundle(ctx context.Context, client crclient.Client) (*byt if configmap.Data != nil { var buf bytes.Buffer for _, crt := range configmap.Data { - buf.WriteString(crt) + // Added a newline character to the end of each certificate to avoid bad concatenation + // of certificates in the buffer using the UI. + buf.WriteString(fmt.Sprintf("%s\n", crt)) } if buf.Len() > 0 { return &buf, nil diff --git a/hypershift-operator/init_test.go b/hypershift-operator/init_test.go index 0354bec6e8..25fa908f26 100644 --- a/hypershift-operator/init_test.go +++ b/hypershift-operator/init_test.go @@ -113,7 +113,37 @@ func TestGetImageRegistryCABundle(t *testing.T) { }, }, clusterImageConfig: nil, - expectedCert: bytes.NewBufferString("test"), + expectedCert: bytes.NewBufferString("test\n"), + expectedError: false, + }, + { + name: "The trusted CA configmap has more than one certificate entry", + objects: []crclient.Object{ + &configapi.Image{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + Spec: configapi.ImageSpec{ + AdditionalTrustedCA: configapi.ConfigMapNameReference{ + Name: "registry-config", + }, + }, + }, + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "registry-config", + Namespace: "openshift-config", + }, + Data: map[string]string{ + "mirror1.registry.com": "test1", + "mirror2.registry.com": "test2", + "mirror3.registry.com": "test3", + "mirror4.registry.com": "test4", + }, + }, + }, + clusterImageConfig: nil, + expectedCert: bytes.NewBufferString("test1\ntest2\ntest3\ntest4\n"), expectedError: false, }, } @@ -127,7 +157,12 @@ func TestGetImageRegistryCABundle(t *testing.T) { if tc.expectedError { g.Expect(err).NotTo(BeNil()) } - g.Expect(cert).To(BeEquivalentTo(tc.expectedCert)) + + if tc.expectedCert != nil { + for _, value := range bytes.Split(cert.Bytes(), []byte("\n")) { + g.Expect(value).Should(BeElementOf(bytes.Split(tc.expectedCert.Bytes(), []byte("\n")))) + } + } }) } } diff --git a/hypershift-operator/main.go b/hypershift-operator/main.go index d3806215de..670555b2f7 100644 --- a/hypershift-operator/main.go +++ b/hypershift-operator/main.go @@ -93,21 +93,22 @@ func main() { } type StartOptions struct { - Namespace string - DeploymentName string - PodName string - MetricsAddr string - CertDir string - EnableOCPClusterMonitoring bool - EnableCIDebugOutput bool - ControlPlaneOperatorImage string - RegistryOverrides map[string]string - PrivatePlatform string - OIDCStorageProviderS3BucketName string - OIDCStorageProviderS3Region string - OIDCStorageProviderS3Credentials string - EnableUWMTelemetryRemoteWrite bool - EnableValidatingWebhook bool + Namespace string + DeploymentName string + PodName string + MetricsAddr string + CertDir string + EnableOCPClusterMonitoring bool + EnableCIDebugOutput bool + ControlPlaneOperatorImage string + RegistryOverrides map[string]string + PrivatePlatform string + OIDCStorageProviderS3BucketName string + OIDCStorageProviderS3Region string + OIDCStorageProviderS3Credentials string + EnableUWMTelemetryRemoteWrite bool + EnableValidatingWebhook bool + EnableDedicatedRequestServingIsolation bool } func NewStartCommand() *cobra.Command { @@ -143,6 +144,7 @@ func NewStartCommand() *cobra.Command { cmd.Flags().StringVar(&opts.OIDCStorageProviderS3Credentials, "oidc-storage-provider-s3-credentials", opts.OIDCStorageProviderS3Credentials, "Location of the credentials file for the OIDC bucket. Required for AWS guest clusters.") cmd.Flags().BoolVar(&opts.EnableUWMTelemetryRemoteWrite, "enable-uwm-telemetry-remote-write", opts.EnableUWMTelemetryRemoteWrite, "If true, enables a controller that ensures user workload monitoring is enabled and that it is configured to remote write telemetry metrics from control planes") cmd.Flags().BoolVar(&opts.EnableValidatingWebhook, "enable-validating-webhook", false, "Enable webhook for validating hypershift API types") + cmd.Flags().BoolVar(&opts.EnableDedicatedRequestServingIsolation, "enable-dedicated-request-serving-isolation", true, "If true, enables scheduling of request serving components to dedicated nodes") cmd.Run = func(cmd *cobra.Command, args []string) { ctx, cancel := context.WithCancel(ctrl.SetupSignalHandler()) @@ -151,7 +153,7 @@ func NewStartCommand() *cobra.Command { switch hyperv1.PlatformType(opts.PrivatePlatform) { case hyperv1.AWSPlatform, hyperv1.NonePlatform: default: - fmt.Println(fmt.Sprintf("Unsupported private platform: %q", opts.PrivatePlatform)) + fmt.Printf("Unsupported private platform: %q\n", opts.PrivatePlatform) os.Exit(1) } @@ -286,7 +288,7 @@ func run(ctx context.Context, opts *StartOptions, log logr.Logger) error { // Populate registry overrides with any ICSP and IDMS from a OpenShift management cluster var imageRegistryOverrides map[string][]string if mgmtClusterCaps.Has(capabilities.CapabilityICSP) || mgmtClusterCaps.Has(capabilities.CapabilityIDMS) { - imageRegistryOverrides, err = globalconfig.GetAllImageRegistryMirrors(ctx, apiReadingClient, mgmtClusterCaps.Has(capabilities.CapabilityIDMS)) + imageRegistryOverrides, err = globalconfig.GetAllImageRegistryMirrors(ctx, apiReadingClient, mgmtClusterCaps.Has(capabilities.CapabilityIDMS), mgmtClusterCaps.Has(capabilities.CapabilityICSP)) if err != nil { return fmt.Errorf("failed to populate image registry overrides: %w", err) } @@ -319,10 +321,9 @@ func run(ctx context.Context, opts *StartOptions, log logr.Logger) error { Client: mgr.GetClient(), ManagementClusterCapabilities: mgmtClusterCaps, HypershiftOperatorImage: operatorImage, - ReleaseProvider: releaseProviderWithOpenShiftImageRegistryOverrides, + OpenShiftImageRegistryOverrides: opts.RegistryOverrides, EnableOCPClusterMonitoring: opts.EnableOCPClusterMonitoring, EnableCIDebugOutput: opts.EnableCIDebugOutput, - ImageMetadataProvider: imageMetaDataProvider, MetricsSet: metricsSet, OperatorNamespace: opts.Namespace, SREConfigHash: sreConfigHash, @@ -374,8 +375,10 @@ func run(ctx context.Context, opts *StartOptions, log logr.Logger) error { ReleaseProvider: releaseProviderWithOpenShiftImageRegistryOverrides, CreateOrUpdateProvider: createOrUpdate, HypershiftOperatorImage: operatorImage, - ImageMetadataProvider: &hyperutil.RegistryClientImageMetadataProvider{}, - KubevirtInfraClients: kvinfra.NewKubevirtInfraClientMap(), + ImageMetadataProvider: &hyperutil.RegistryClientImageMetadataProvider{ + OpenShiftImageRegistryOverrides: imageRegistryOverrides, + }, + KubevirtInfraClients: kvinfra.NewKubevirtInfraClientMap(), }).SetupWithManager(mgr); err != nil { return fmt.Errorf("unable to create controller: %w", err) } @@ -416,7 +419,7 @@ func run(ctx context.Context, opts *StartOptions, log logr.Logger) error { } // If enabled, start controller to ensure UWM stack is enabled and configured - // to remotely write telemetry metrics + // to remotely write telemetry metrics. if opts.EnableUWMTelemetryRemoteWrite { if err := (&uwmtelemetry.Reconciler{ Namespace: opts.Namespace, @@ -425,20 +428,27 @@ func run(ctx context.Context, opts *StartOptions, log logr.Logger) error { }).SetupWithManager(mgr); err != nil { return fmt.Errorf("unable to create uwm telemetry controller: %w", err) } + log.Info("UWM telemetry remote write controller enabled") + } else { + log.Info("UWM telemetry remote write controller disabled") } // Start controllers to manage dedicated request serving isolation - nodeReaper := scheduler.DedicatedServingComponentNodeReaper{ - Client: mgr.GetClient(), - } - if err := nodeReaper.SetupWithManager(mgr); err != nil { - return fmt.Errorf("unable to create dedicated serving component node reaper controller: %w", err) - } - hcScheduler := scheduler.DedicatedServingComponentScheduler{ - Client: mgr.GetClient(), - } - if err := hcScheduler.SetupWithManager(mgr, createOrUpdate); err != nil { - return fmt.Errorf("unable to create dedicated serving component scheduler controller: %w", err) + if opts.EnableDedicatedRequestServingIsolation { + nodeReaper := scheduler.DedicatedServingComponentNodeReaper{ + Client: mgr.GetClient(), + } + if err := nodeReaper.SetupWithManager(mgr); err != nil { + return fmt.Errorf("unable to create dedicated serving component node reaper controller: %w", err) + } + hcScheduler := scheduler.DedicatedServingComponentScheduler{ + Client: mgr.GetClient(), + } + if err := hcScheduler.SetupWithManager(mgr, createOrUpdate); err != nil { + return fmt.Errorf("unable to create dedicated serving component scheduler controller: %w", err) + } + } else { + log.Info("Dedicated request serving isolation controllers disabled") } // If it exists, block default ingress controller from admitting HCP private routes diff --git a/ignition-server/cmd/run_local_ignitionprovider.go b/ignition-server/cmd/run_local_ignitionprovider.go index 2d494ad9a4..e4b379672c 100644 --- a/ignition-server/cmd/run_local_ignitionprovider.go +++ b/ignition-server/cmd/run_local_ignitionprovider.go @@ -97,7 +97,7 @@ func (o *RunLocalIgnitionProviderOptions) Run(ctx context.Context) error { p := &controllers.LocalIgnitionProvider{ Client: cl, - ReleaseProvider: &releaseinfo.RegistryClientProvider{}, + ReleaseProvider: &releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator{}, CloudProvider: "", Namespace: o.Namespace, WorkDir: o.WorkDir, @@ -106,7 +106,7 @@ func (o *RunLocalIgnitionProviderOptions) Run(ctx context.Context) error { FeatureGateManifest: o.FeatureGateManifest, } - payload, err := p.GetPayload(ctx, o.Image, config.String(), "") + payload, err := p.GetPayload(ctx, o.Image, config.String(), "", "") if err != nil { return err } diff --git a/ignition-server/cmd/start.go b/ignition-server/cmd/start.go index 29812ef133..6bc319f2b5 100644 --- a/ignition-server/cmd/start.go +++ b/ignition-server/cmd/start.go @@ -246,6 +246,13 @@ func run(ctx context.Context, opts Options) error { return } + if err := util.SanitizeIgnitionPayload(value.Payload); err != nil { + log.Printf("Invalid ignition payload: %s", err) + http.Error(w, "Invalid ignition payload", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(http.StatusOK) w.Write(value.Payload) @@ -285,6 +292,7 @@ func run(ctx context.Context, opts Options) error { tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, }, }, + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), } go func() { diff --git a/ignition-server/controllers/local_ignitionprovider.go b/ignition-server/controllers/local_ignitionprovider.go index 57b30b4e84..1824aec852 100644 --- a/ignition-server/controllers/local_ignitionprovider.go +++ b/ignition-server/controllers/local_ignitionprovider.go @@ -1,6 +1,7 @@ package controllers import ( + "bytes" "context" "crypto/x509" "crypto/x509/pkix" @@ -51,7 +52,7 @@ import ( // measurements. type LocalIgnitionProvider struct { Client client.Client - ReleaseProvider releaseinfo.Provider + ReleaseProvider releaseinfo.ProviderWithOpenShiftImageRegistryOverrides CloudProvider hyperv1.PlatformType Namespace string @@ -76,7 +77,7 @@ type LocalIgnitionProvider struct { var _ IgnitionProvider = (*LocalIgnitionProvider)(nil) -func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage string, customConfig string, pullSecretHash string) ([]byte, error) { +func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage string, customConfig string, pullSecretHash string, hcConfigurationHash string) ([]byte, error) { p.lock.Lock() defer p.lock.Unlock() @@ -99,7 +100,7 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str } // Verify the pullSecret hash matches the passed-in parameter pullSecretHash to ensure the correct pull secret gets loaded into the payload - if pullSecretHash != "" && util.HashStruct(pullSecret) != pullSecretHash { + if pullSecretHash != "" && util.HashSimple(pullSecret) != pullSecretHash { return nil, fmt.Errorf("pull secret does not match hash") } @@ -125,6 +126,11 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str return nil, fmt.Errorf("failed to get machine-config-server configmap: %w", err) } + // Verify the MCS configmap is up-to-date + if hcConfigurationHash != "" && mcsConfig.Data["configuration-hash"] != hcConfigurationHash { + return nil, fmt.Errorf("machine-config-server configmap is out of date, waiting for update %s != %s", mcsConfig.Data["configuration-hash"], hcConfigurationHash) + } + // Look up the release image metadata imageProvider, err := func() (*imageprovider.ReleaseImageProvider, error) { img, err := p.ReleaseProvider.Lookup(ctx, releaseImage, pullSecret) @@ -185,6 +191,9 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str } // Extract MCS config files into the config directory for name, contents := range mcsConfig.Data { + if name == "configuration-hash" { + continue + } if err := os.WriteFile(filepath.Join(configDir, name), []byte(contents), 0644); err != nil { return nil, fmt.Errorf("failed to write MCS config file %q: %w", name, err) } @@ -193,11 +202,14 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str err = func() error { start := time.Now() - // Replace the release image with the mirrored release image in disconnected environment cases - mirroredReleaseImage, ok := os.LookupEnv("MIRRORED_RELEASE_IMAGE") - if ok { - log.Info("replaced release image with mirrored release image to extract image-references", "releaseImage", releaseImage, "mirroredReleaseImage", mirroredReleaseImage) - releaseImage = mirroredReleaseImage + // Replace the release image with the mirrored release image in disconnected environment cases. + // ProviderWithOpenShiftImageRegistryOverrides Lookup will store the mirrored release image if it exists. + _, err := p.ReleaseProvider.Lookup(ctx, releaseImage, pullSecret) + if err != nil { + return fmt.Errorf("failed to look up release image metadata: %w", err) + } + if p.ReleaseProvider.GetMirroredReleaseImage() != "" { + releaseImage = p.ReleaseProvider.GetMirroredReleaseImage() } if err := registryclient.ExtractImageFilesToDir(ctx, releaseImage, pullSecret, "release-manifests/image-references", configDir); err != nil { @@ -238,47 +250,50 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str return nil, fmt.Errorf("failed to extract templates from image: %w", err) } + payloadVersion, err := semver.Parse(imageProvider.Version()) + if err != nil { + return nil, fmt.Errorf("failed to parse payload version: %w", err) + } + + // set the component to the correct binary name and file path based on the payload version + clusterConfigComponent := "cluster-config-api" + clusterConfigComponentShort := "cca" + clusterConfigFile := "usr/bin/render" + + if payloadVersion.Major == 4 && payloadVersion.Minor < 15 { + clusterConfigComponent = "cluster-config-operator" + clusterConfigComponentShort = "cco" + clusterConfigFile = "usr/bin/cluster-config-operator" + } + // Extract binaries from the MCO image into the bin directory + err = p.extractMCOBinaries(ctx, "/usr/lib/os-release", mcoImage, pullSecret, binDir) + if err != nil { + return nil, fmt.Errorf("failed to download MCO binaries: %w", err) + } + err = func() error { start := time.Now() - binaries := []string{"machine-config-operator", "machine-config-controller", "machine-config-server"} - for _, name := range binaries { - file, err := os.Create(filepath.Join(binDir, name)) - if err != nil { - return fmt.Errorf("failed to create file: %w", err) - } - if err := file.Chmod(0777); err != nil { - return fmt.Errorf("failed to chmod file: %w", err) - } - if err := p.ImageFileCache.extractImageFile(ctx, mcoImage, pullSecret, filepath.Join("usr/bin/", name), file); err != nil { - return fmt.Errorf("failed to extract image file: %w", err) - } - if err := file.Close(); err != nil { - return fmt.Errorf("failed to close file: %w", err) - } - } - - component = "cluster-config-operator" - clusterConfigOperatorImage, ok := imageProvider.ImageExist(component) + clusterConfigImage, ok := imageProvider.ImageExist(clusterConfigComponent) if !ok { - return fmt.Errorf("release image does not contain cluster-config-operator (images: %v)", imageProvider.ComponentImages()) + return fmt.Errorf("release image does not contain $%s (images: %v)", clusterConfigComponent, imageProvider.ComponentImages()) } - clusterConfigOperatorImage, err = registryclient.GetCorrectArchImage(ctx, component, clusterConfigOperatorImage, pullSecret) + clusterConfigImage, err = registryclient.GetCorrectArchImage(ctx, clusterConfigComponent, clusterConfigImage, pullSecret) if err != nil { return err } - log.Info("discovered cluster config operator image", "image", clusterConfigOperatorImage) + log.Info(fmt.Sprintf("discovered image %s image %v", clusterConfigComponent, clusterConfigImage)) - file, err := os.Create(filepath.Join(binDir, component)) + file, err := os.Create(filepath.Join(binDir, clusterConfigComponent)) if err != nil { return fmt.Errorf("failed to create file: %w", err) } if err := file.Chmod(0777); err != nil { return fmt.Errorf("failed to chmod file: %w", err) } - if err := p.ImageFileCache.extractImageFile(ctx, clusterConfigOperatorImage, pullSecret, filepath.Join("usr/bin/", component), file); err != nil { + if err := p.ImageFileCache.extractImageFile(ctx, clusterConfigImage, pullSecret, clusterConfigFile, file); err != nil { return fmt.Errorf("failed to extract image file: %w", err) } if err := file.Close(); err != nil { @@ -292,11 +307,6 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str return nil, fmt.Errorf("failed to download binaries: %w", err) } - payloadVersion, err := semver.Parse(imageProvider.Version()) - if err != nil { - return nil, fmt.Errorf("failed to parse payload version: %w", err) - } - featureGateBytes, err := os.ReadFile(p.FeatureGateManifest) if err != nil { return nil, fmt.Errorf("failed to read feature gate: %w", err) @@ -307,19 +317,19 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str args := []string{ "-c", - invokeFeatureGateRenderScript(filepath.Join(binDir, "cluster-config-operator"), filepath.Join(workDir, "cco"), mccBaseDir, payloadVersion, string(featureGateBytes)), + invokeFeatureGateRenderScript(filepath.Join(binDir, clusterConfigComponent), filepath.Join(workDir, clusterConfigComponentShort), mccBaseDir, payloadVersion, string(featureGateBytes)), } cmd := exec.CommandContext(ctx, "/bin/bash", args...) out, err := cmd.CombinedOutput() - log.Info("cluster-config-operator process completed", "time", time.Since(start).Round(time.Second).String(), "output", string(out)) if err != nil { - return fmt.Errorf("cluster-config-operator process failed: %s: %w", string(out), err) + return fmt.Errorf("%s process failed: %s: %w", clusterConfigComponent, string(out), err) } + log.Info(fmt.Sprintf("%s process completed", clusterConfigComponent), "time", time.Since(start).Round(time.Second).String(), "output", string(out)) return nil }() if err != nil { - return nil, fmt.Errorf("failed to execute cluster-config-operator: %w", err) + return nil, fmt.Errorf("failed to execute %s: %w", clusterConfigComponent, err) } // First, run the MCO using templates and image refs as input. This generates @@ -357,6 +367,7 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str fmt.Sprintf("--pull-secret=%s/pull-secret.yaml", configDir), fmt.Sprintf("--dest-dir=%s", destDir), fmt.Sprintf("--additional-trust-bundle-config-file=%s/user-ca-bundle-config.yaml", configDir), + fmt.Sprintf("--release-image=%s", releaseImage), } // Depending on the version, we need different args. @@ -457,6 +468,11 @@ func (p *LocalIgnitionProvider) GetPayload(ctx context.Context, releaseImage str err = func() error { start := time.Now() + // copy the image config out of the configDir and into the mccBaseDir + if err := copyFile(filepath.Join(configDir, "image-config.yaml"), filepath.Join(mccBaseDir, "image-config.yaml")); err != nil { + return fmt.Errorf("failed to copy image-config.yaml: %w", err) + } + args := []string{ "bootstrap", fmt.Sprintf("--manifest-dir=%s", mccBaseDir), @@ -659,6 +675,26 @@ cat <%[2]s/manifests/99_feature-gate.yaml %[5]s EOF +%[1]s \ + --asset-output-dir %[2]s/output \ + --image-manifests=input \ + --rendered-manifest-dir=%[2]s/manifests \ + --cluster-profile=ibm-cloud-managed \ + --payload-version=%[4]s +cp %[2]s/manifests/99_feature-gate.yaml %[3]s/99_feature-gate.yaml +` + + // Depending on the version, we need different args. + if payloadVersion.Major == 4 && payloadVersion.Minor < 15 { + script = `#!/bin/bash +set -e +mkdir -p %[2]s +cd %[2]s +mkdir -p input output manifests +touch %[2]s/manifests/99_feature-gate.yaml +cat <%[2]s/manifests/99_feature-gate.yaml +%[5]s +EOF %[1]s render \ --config-output-file config \ --asset-input-dir %[2]s/input \ @@ -667,21 +703,19 @@ EOF --payload-version=%[4]s cp %[2]s/manifests/99_feature-gate.yaml %[3]s/99_feature-gate.yaml ` + } // Depending on the version, we need different args. - if payloadVersion.Minor < 14 { + if payloadVersion.Major == 4 && payloadVersion.Minor < 14 { script = `#!/bin/bash set -e mkdir -p %[2]s - cd %[2]s mkdir -p input output manifests - touch %[2]s/manifests/99_feature-gate.yaml cat <%[2]s/manifests/99_feature-gate.yaml %[5]s EOF - %[1]s render \ --config-output-file config \ --asset-input-dir %[2]s/input \ @@ -692,3 +726,75 @@ cp %[2]s/manifests/99_feature-gate.yaml %[3]s/99_feature-gate.yaml return fmt.Sprintf(script, binary, workDir, outputDir, payloadVersion, featureGateYAML) } + +func (p *LocalIgnitionProvider) extractMCOBinaries(ctx context.Context, cpoOSReleaseFile string, mcoImage string, pullSecret []byte, binDir string) error { + start := time.Now() + binaries := []string{"machine-config-operator", "machine-config-controller", "machine-config-server"} + suffix := "" + + mcoOSReleaseBuf := &bytes.Buffer{} + if err := p.ImageFileCache.extractImageFile(ctx, mcoImage, pullSecret, "usr/lib/os-release", mcoOSReleaseBuf); err != nil { + return fmt.Errorf("failed to extract image os-release file: %w", err) + } + mcoOSRelease := mcoOSReleaseBuf.String() + + // read /etc/os-release file from disk to cpoOSRelease + cpoOSRelease, err := os.ReadFile(cpoOSReleaseFile) + if err != nil { + return fmt.Errorf("failed to read cpo os-release file: %w", err) + } + + // extract RHEL major version from both os-release files + extractMajorVersion := func(osRelease string) (string, error) { + for _, line := range strings.Split(osRelease, "\n") { + if strings.HasPrefix(line, "VERSION_ID=") { + return strings.Split(strings.TrimSuffix(strings.TrimPrefix(line, "VERSION_ID=\""), "\""), ".")[0], nil + } + } + return "", fmt.Errorf("failed to find VERSION_ID in os-release file") + } + mcoRHELMajorVersion, err := extractMajorVersion(mcoOSRelease) + if err != nil { + return fmt.Errorf("failed to extract major version from MCO os-release: %w", err) + } + cpoRHELMajorVersion, err := extractMajorVersion(string(cpoOSRelease)) + if err != nil { + return fmt.Errorf("failed to extract major version from CPO os-release: %w", err) + } + log.Info("read os-release", "mcoRHELMajorVersion", mcoRHELMajorVersion, "cpoRHELMajorVersion", cpoRHELMajorVersion) + + if mcoRHELMajorVersion == "8" && cpoRHELMajorVersion == "9" { + // NodePool MCO RHEL major version is older than the CPO, need to add suffix to the binaries + suffix = ".rhel9" + } + + for _, name := range binaries { + srcPath := filepath.Join("usr/bin/", name+suffix) + destPath := filepath.Join(binDir, name) + file, err := os.Create(destPath) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + if err := file.Chmod(0777); err != nil { + return fmt.Errorf("failed to chmod file: %w", err) + } + log.Info("copying file", "src", srcPath, "dest", destPath) + if err := p.ImageFileCache.extractImageFile(ctx, mcoImage, pullSecret, srcPath, file); err != nil { + if suffix == "" { + return fmt.Errorf("failed to extract image file: %w", err) + } + // The MCO image in the NodePool release image does not contain the suffixed binary, try to extract the unsuffixed binary + srcPath = filepath.Join("usr/bin/", name) + log.Info("suffixed binary not found, copying file", "src", srcPath, "dest", destPath) + if err := p.ImageFileCache.extractImageFile(ctx, mcoImage, pullSecret, filepath.Join("usr/bin/", name), file); err != nil { + return fmt.Errorf("failed to extract image file: %w", err) + } + } + if err := file.Close(); err != nil { + return fmt.Errorf("failed to close file: %w", err) + } + } + + log.Info("downloaded binaries", "time", time.Since(start).Round(time.Second).String()) + return nil +} diff --git a/ignition-server/controllers/local_ignitionprovider_test.go b/ignition-server/controllers/local_ignitionprovider_test.go new file mode 100644 index 0000000000..dbd42f60e5 --- /dev/null +++ b/ignition-server/controllers/local_ignitionprovider_test.go @@ -0,0 +1,126 @@ +package controllers + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + . "github.com/onsi/gomega" +) + +func TestExtractMCOBinaries(t *testing.T) { + testCases := []struct { + name string + mcoOSReleaseVersion string + cpoOSReleaseVersion string + expectedBinaryVersion string + cacheFunc regClient + }{ + { + name: "When both MCO and CPO are on RHEL 8, it should extract the RHEL 8 binaries with no prefix", + mcoOSReleaseVersion: "8.1", + cpoOSReleaseVersion: "8.2", + expectedBinaryVersion: "rhel8", + }, + { + name: "When both MCO is in RHEL 8 and CPO on RHEL 9, it should extract the RHEL 9 binaries with the .rhel9 prefix", + mcoOSReleaseVersion: "8.1", + cpoOSReleaseVersion: "9.1", + expectedBinaryVersion: "rhel9", + }, + { + name: "When MCO is in too old version and CPO on RHEL 9, and the RHEL 9 binaries do not exist it should extract the RHEL 8 binaries with no prefix", + mcoOSReleaseVersion: "8.0", + cpoOSReleaseVersion: "9.1", + expectedBinaryVersion: "rhel8", + cacheFunc: func(ctx context.Context, imageRef string, pullSecret []byte, file string, out io.Writer) error { + switch file { + case "usr/lib/os-release": + _, err := out.Write([]byte(fmt.Sprintf("VERSION_ID=\"%s\"\n", "8.0"))) + return err + case "usr/bin/machine-config-operator", "usr/bin/machine-config-controller", "usr/bin/machine-config-server": + _, err := out.Write([]byte("rhel8")) + return err + case "usr/bin/machine-config-operator.rhel9", "usr/bin/machine-config-controller.rhel9", "usr/bin/machine-config-server.rhel9": + return fmt.Errorf("file not found: %s", file) + default: + return fmt.Errorf("unexpected file: %s", file) + } + }, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + tempDir, err := os.MkdirTemp("", "testExtractBinaries-*") + if err != nil { + t.Fatalf("Failed to create temporary directory: %v", err) + } + defer os.RemoveAll(tempDir) + + // Set up the necessary variables for testing. + ctx := context.Background() + mcoImage := "fake" + pullSecret := []byte{} + binDir := filepath.Join(tempDir, "bin") + os.Mkdir(binDir, 0755) + + // Create a fake file cache that returns the expected binaries. + imageFileCache := &imageFileCache{ + cacheMap: make(map[cacheKey]cacheValue), + cacheDir: tempDir, + } + imageFileCache.regClient = func(ctx context.Context, imageRef string, pullSecret []byte, file string, out io.Writer) error { + switch file { + case "usr/lib/os-release": + _, err := out.Write([]byte(fmt.Sprintf("VERSION_ID=\"%s\"\n", tc.mcoOSReleaseVersion))) + return err + case "usr/bin/machine-config-operator", "usr/bin/machine-config-controller", "usr/bin/machine-config-server": + _, err := out.Write([]byte("rhel8")) + return err + case "usr/bin/machine-config-operator.rhel9", "usr/bin/machine-config-controller.rhel9", "usr/bin/machine-config-server.rhel9": + _, err := out.Write([]byte("rhel9")) + return err + default: + return fmt.Errorf("unexpected file: %s", file) + } + } + + // If the test case has a custom cache function, use it. + // This is useful to simulate the case where the ocp release for the NodePool is too old that it doesn't have the RHEL binaries. + if tc.cacheFunc != nil { + imageFileCache.regClient = tc.cacheFunc + } + + // Create a fake cpo os-release file + cpoOSRelease := fmt.Sprintf("VERSION_ID=\"%s\"\n", tc.cpoOSReleaseVersion) + cpoOSReleaseFilePath := filepath.Join(tempDir, "usr/lib/os-release") + err = os.MkdirAll(filepath.Dir(cpoOSReleaseFilePath), 0755) + g.Expect(err).NotTo(HaveOccurred()) + err = os.WriteFile(cpoOSReleaseFilePath, []byte(cpoOSRelease), 0644) + g.Expect(err).NotTo(HaveOccurred()) + + // Create a LocalIgnitionProvider instance for testing. + provider := &LocalIgnitionProvider{ + ImageFileCache: imageFileCache, + WorkDir: tempDir, + } + + // Call the extractMCOBinaries. + err = provider.extractMCOBinaries(ctx, cpoOSReleaseFilePath, mcoImage, pullSecret, binDir) + g.Expect(err).NotTo(HaveOccurred()) + + // Verify the extracted binaries match the expected version. + for _, name := range []string{"machine-config-operator", "machine-config-controller", "machine-config-server"} { + filePath := filepath.Join(binDir, name) + fileContent, err := os.ReadFile(filePath) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(string(fileContent)).To(Equal(tc.expectedBinaryVersion)) + } + }) + } +} diff --git a/ignition-server/controllers/machineconfigserver_ignitionprovider.go b/ignition-server/controllers/machineconfigserver_ignitionprovider.go index 70f1cf6eb0..36060d923b 100644 --- a/ignition-server/controllers/machineconfigserver_ignitionprovider.go +++ b/ignition-server/controllers/machineconfigserver_ignitionprovider.go @@ -44,7 +44,7 @@ type MCSIgnitionProvider struct { Namespace string } -func (p *MCSIgnitionProvider) GetPayload(ctx context.Context, releaseImage string, config string, pullSecretHash string) (payload []byte, err error) { +func (p *MCSIgnitionProvider) GetPayload(ctx context.Context, releaseImage string, config string, pullSecretHash string, _ string) (payload []byte, err error) { pullSecret := &corev1.Secret{} if err := p.Client.Get(ctx, client.ObjectKey{Namespace: p.Namespace, Name: pullSecretName}, pullSecret); err != nil { return nil, fmt.Errorf("failed to get pull secret: %w", err) diff --git a/ignition-server/controllers/tokensecret_controller.go b/ignition-server/controllers/tokensecret_controller.go index 08acc73305..3da2774c7b 100644 --- a/ignition-server/controllers/tokensecret_controller.go +++ b/ignition-server/controllers/tokensecret_controller.go @@ -23,18 +23,19 @@ import ( ) const ( - TokenSecretReleaseKey = "release" - TokenSecretConfigKey = "config" - TokenSecretTokenKey = "token" - TokenSecretOldTokenKey = "old_token" - TokenSecretPayloadKey = "payload" - TokenSecretMessageKey = "message" - TokenSecretPullSecretHashKey = "pull-secret-hash" - InvalidConfigReason = "InvalidConfig" - TokenSecretReasonKey = "reason" - TokenSecretAnnotation = "hypershift.openshift.io/ignition-config" - TokenSecretNodePoolUpgradeType = "hypershift.openshift.io/node-pool-upgrade-type" - TokenSecretTokenGenerationTime = "hypershift.openshift.io/last-token-generation-time" + TokenSecretReleaseKey = "release" + TokenSecretConfigKey = "config" + TokenSecretTokenKey = "token" + TokenSecretOldTokenKey = "old_token" + TokenSecretPayloadKey = "payload" + TokenSecretMessageKey = "message" + TokenSecretPullSecretHashKey = "pull-secret-hash" + TokenSecretHCConfigurationHashKey = "hc-configuration-hash" + InvalidConfigReason = "InvalidConfig" + TokenSecretReasonKey = "reason" + TokenSecretAnnotation = "hypershift.openshift.io/ignition-config" + TokenSecretNodePoolUpgradeType = "hypershift.openshift.io/node-pool-upgrade-type" + TokenSecretTokenGenerationTime = "hypershift.openshift.io/last-token-generation-time" // Set the ttl 1h above the reconcile resync period so every existing // token Secret has the chance to rotate their token ID during a reconciliation cycle // while the expired ones get eventually garbageCollected. @@ -78,7 +79,7 @@ func NewPayloadStore() *ExpiringCache { type IgnitionProvider interface { // GetPayload returns the ignition payload content for // the provided release image and a config string containing 0..N MachineConfig yaml definitions. - GetPayload(ctx context.Context, payloadImage, config string, pullSecretHash string) ([]byte, error) + GetPayload(ctx context.Context, payloadImage, config string, pullSecretHash string, hcConfigurationHash string) ([]byte, error) } // TokenSecretReconciler watches token Secrets @@ -258,9 +259,10 @@ func (r *TokenSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) PayloadCacheMissTotal.Inc() pullSecretHash := string(tokenSecret.Data[TokenSecretPullSecretHashKey]) + hcConfigurationHash := string(tokenSecret.Data[TokenSecretHCConfigurationHashKey]) payload, err := func() ([]byte, error) { start := time.Now() - payload, err := r.IgnitionProvider.GetPayload(ctx, releaseImage, config.String(), pullSecretHash) + payload, err := r.IgnitionProvider.GetPayload(ctx, releaseImage, config.String(), pullSecretHash, hcConfigurationHash) if err != nil { return nil, fmt.Errorf("error getting ignition payload: %v", err) } diff --git a/ignition-server/controllers/tokensecret_controller_test.go b/ignition-server/controllers/tokensecret_controller_test.go index 310d3c2b2f..c39c882e16 100644 --- a/ignition-server/controllers/tokensecret_controller_test.go +++ b/ignition-server/controllers/tokensecret_controller_test.go @@ -25,7 +25,7 @@ var ( type fakeIgnitionProvider struct{} -func (p *fakeIgnitionProvider) GetPayload(ctx context.Context, releaseImage string, config string, pullSecretHash string) (payload []byte, err error) { +func (p *fakeIgnitionProvider) GetPayload(ctx context.Context, releaseImage string, config string, pullSecretHash string, hcConfigurationHash string) (payload []byte, err error) { return []byte(fakePayload), nil } diff --git a/konnectivity-https-proxy/cmd.go b/konnectivity-https-proxy/cmd.go new file mode 100644 index 0000000000..3c6f505413 --- /dev/null +++ b/konnectivity-https-proxy/cmd.go @@ -0,0 +1,193 @@ +package konnectivityhttpsproxy + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "os" + + "github.com/elazarl/goproxy" + "github.com/go-logr/logr" + "github.com/openshift/hypershift/pkg/version" + "github.com/openshift/hypershift/support/konnectivityproxy" + "github.com/spf13/cobra" + "go.uber.org/zap/zapcore" + "golang.org/x/net/http/httpproxy" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func NewStartCommand() *cobra.Command { + zLogger := zap.New( + zap.UseDevMode(true), + zap.JSONEncoder(func(o *zapcore.EncoderConfig) { + o.EncodeTime = zapcore.RFC3339TimeEncoder + }), + ) + log.SetLogger(zLogger) + l := log.Log.WithName("konnectivity-https-proxy") + cmd := &cobra.Command{ + Use: "konnectivity-https-proxy", + Short: "Runs the konnectivity https proxy server.", + Long: ` Runs the konnectivity https proxy server. + This proxy accepts request and tunnels them through the designated Konnectivity Server.`, + } + + opts := konnectivityproxy.Options{ + ResolveBeforeDial: true, + ResolveFromGuestClusterDNS: true, + } + + var servingPort uint32 + var httpProxyURL string + var httpsProxyURL string + var noProxy string + + cmd.Flags().StringVar(&opts.KonnectivityHost, "konnectivity-hostname", "konnectivity-server-local", "The hostname of the konnectivity service.") + cmd.Flags().Uint32Var(&opts.KonnectivityPort, "konnectivity-port", 8090, "The konnectivity port that https proxy should connect to.") + cmd.Flags().Uint32Var(&servingPort, "serving-port", 8090, "The port that https proxy should serve on.") + + cmd.Flags().StringVar(&opts.CAFile, "ca-cert-path", "/etc/konnectivity/proxy-ca/ca.crt", "The path to the konnectivity client's ca-cert.") + cmd.Flags().StringVar(&opts.ClientCertFile, "tls-cert-path", "/etc/konnectivity/proxy-client/tls.crt", "The path to the konnectivity client's tls certificate.") + cmd.Flags().StringVar(&opts.ClientKeyFile, "tls-key-path", "/etc/konnectivity/proxy-client/tls.key", "The path to the konnectivity client's private key.") + + cmd.Flags().StringVar(&httpProxyURL, "http-proxy", "", "HTTP proxy to use on hosted cluster requests") + cmd.Flags().StringVar(&httpsProxyURL, "https-proxy", "", "HTTPS proxy to use on hosted cluster requests") + cmd.Flags().StringVar(&noProxy, "no-proxy", "", "URLs that should not use the provided http-proxy and https-proxy") + + cmd.Flags().BoolVar(&opts.ConnectDirectlyToCloudAPIs, "connect-directly-to-cloud-apis", false, "If true, bypass konnectivity to connect to cloud APIs while still honoring management proxy config") + + cmd.Run = func(cmd *cobra.Command, args []string) { + l.Info("Starting proxy", "version", version.String()) + c, err := client.New(ctrl.GetConfigOrDie(), client.Options{}) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: failed to get kubernetes client: %v", err) + os.Exit(1) + } + opts.Client = c + opts.Log = l + + var proxyTLS *tls.Config + var proxyURLHostPort *string + proxyHostNames := sets.New[string]() + + if len(httpsProxyURL) > 0 { + u, err := url.Parse(httpsProxyURL) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: failed to parse HTTPS proxy URL: %v", err) + os.Exit(1) + } + hostName, _, err := net.SplitHostPort(u.Host) + if err == nil { + proxyHostNames.Insert(hostName) + } + l.V(4).Info("Data plane HTTPS proxy is set", "hostname", hostName, "url", u.String()) + proxyURLHostPort = ptr.To(u.Host) + } + if len(httpProxyURL) > 0 { + u, err := url.Parse(httpProxyURL) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: failed to parse HTTP proxy URL: %v", err) + os.Exit(1) + } + hostName, _, err := net.SplitHostPort(u.Host) + if err == nil { + proxyHostNames.Insert(hostName) + } + l.V(4).Info("Data plane HTTP proxy is set", "hostname", hostName, "url", u.String()) + if proxyURLHostPort == nil { + proxyURLHostPort = ptr.To(u.Host) + } + } + l.V(4).Info("Excluding API hosts from isCloudAPI check", "hosts", sets.List(proxyHostNames)) + opts.ExcludeCloudAPIHosts = sets.List(proxyHostNames) + + konnectivityDialer, err := konnectivityproxy.NewKonnectivityDialer(opts) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: failed to initialize konnectivity dialer: %v", err) + os.Exit(1) + } + + userProxyConfig := &httpproxy.Config{ + HTTPProxy: httpProxyURL, + HTTPSProxy: httpsProxyURL, + NoProxy: noProxy, + } + userProxyFunc := userProxyConfig.ProxyFunc() + + httpProxy := goproxy.NewProxyHttpServer() + httpProxy.Verbose = true + + if proxyURLHostPort != nil { + host, _, err := net.SplitHostPort(*proxyURLHostPort) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: failed to split proxy URL host port (%s): %v", *proxyURLHostPort, err) + } + proxyTLS = &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: host, + } + } + httpProxy.Tr = &http.Transport{ + TLSClientConfig: proxyTLS, + Proxy: func(req *http.Request) (*url.URL, error) { + l.V(4).Info("Determining whether request should be proxied", "url", req.URL) + u, err := userProxyFunc(req.URL) + if err != nil { + l.V(4).Error(err, "failed to determine whether request should be proxied") + return nil, err + } + l.V(4).Info("Should proxy", "url", u) + return u, nil + }, + Dial: konnectivityDialer.Dial, + } + if httpsProxyURL != "" { + httpProxy.ConnectDialWithReq = connectDialFunc(l, httpProxy, httpsProxyURL, opts.ConnectDirectlyToCloudAPIs, konnectivityDialer.IsCloudAPI, userProxyFunc) + } else { + httpProxy.ConnectDial = nil + httpProxy.ConnectDialWithReq = nil + } + err = http.ListenAndServe(fmt.Sprintf(":%d", servingPort), httpProxy) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v", err) + os.Exit(1) + } + } + + return cmd +} + +func connectDialFunc(log logr.Logger, httpProxy *goproxy.ProxyHttpServer, proxyURL string, connectDirectlyToCloudAPIs bool, isCloudAPI func(string) bool, userProxyFunc func(*url.URL) (*url.URL, error)) func(req *http.Request, network, addr string) (net.Conn, error) { + defaultDial := httpProxy.NewConnectDialToProxy(proxyURL) + return func(req *http.Request, network, addr string) (net.Conn, error) { + log.V(4).Info("Connect dial called", "network", network, "address", addr, "URL", req.URL) + requestURL := *req.URL + // Ensure the request URL scheme is set. This function is only called + // for requests to https endpoints. + requestURL.Scheme = "https" + proxyURL, err := userProxyFunc(&requestURL) + if err != nil { + return nil, err + } + log.V(4).Info("Determined proxy URL", "url", proxyURL) + host, _, err := net.SplitHostPort(requestURL.Host) + if err != nil { + return nil, err + } + // If the URL is a cloud API or it should not be proxied, then + // send it through the dialer directly. + if (connectDirectlyToCloudAPIs && isCloudAPI(host)) || proxyURL == nil { + log.V(4).Info("Host is cloud API or should not use a proxy with it, dialing directly through konnectivity") + return httpProxy.Tr.Dial(network, addr) + } + log.V(4).Info("Using proxy to dial", "proxy", proxyURL) + return defaultDial(network, addr) + } +} diff --git a/konnectivity-socks5-proxy/main.go b/konnectivity-socks5-proxy/main.go index 96fd0225a6..35dead2cf2 100644 --- a/konnectivity-socks5-proxy/main.go +++ b/konnectivity-socks5-proxy/main.go @@ -1,32 +1,18 @@ package konnectivitysocks5proxy import ( - "bufio" - "context" - "crypto/tls" - "errors" "fmt" - "net" - "net/http" - "strings" - "sync" - "time" + "os" "github.com/armon/go-socks5" "github.com/openshift/hypershift/pkg/version" + "github.com/openshift/hypershift/support/konnectivityproxy" "github.com/spf13/cobra" "go.uber.org/zap/zapcore" - "golang.org/x/net/proxy" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - "github.com/go-logr/logr" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/apiserver-network-proxy/pkg/util" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) func NewStartCommand() *cobra.Command { @@ -45,55 +31,42 @@ func NewStartCommand() *cobra.Command { `, } - var proxyHostname string - var proxyPort int - var servingPort int - var caCertPath string - var clientCertPath string - var clientKeyPath string - var connectDirectlyToCloudAPIs bool - var resolveFromGuestClusterDNS bool - var resolveFromManagementClusterDNS bool + opts := konnectivityproxy.Options{} + + var servingPort uint32 - cmd.Flags().StringVar(&proxyHostname, "konnectivity-hostname", "konnectivity-server-local", "The hostname of the konnectivity service.") - cmd.Flags().IntVar(&proxyPort, "konnectivity-port", 8090, "The konnectivity port that socks5 proxy should connect to.") - cmd.Flags().IntVar(&servingPort, "serving-port", 8090, "The port that socks5 proxy should serve on.") - cmd.Flags().BoolVar(&connectDirectlyToCloudAPIs, "connect-directly-to-cloud-apis", false, "If true, traffic destined for AWS or Azure APIs should be sent there directly rather than going through konnectivity. If enabled, proxy env vars from the mgmt cluster must be propagated to this container") - cmd.Flags().BoolVar(&resolveFromGuestClusterDNS, "resolve-from-guest-cluster-dns", false, "If DNS resolving should use the guest clusters cluster-dns") - cmd.Flags().BoolVar(&resolveFromManagementClusterDNS, "resolve-from-management-cluster-dns", false, "If guest cluster's dns fails, fallback to the management cluster's dns") + cmd.Flags().StringVar(&opts.KonnectivityHost, "konnectivity-hostname", "konnectivity-server-local", "The hostname of the konnectivity service.") + cmd.Flags().Uint32Var(&opts.KonnectivityPort, "konnectivity-port", 8090, "The konnectivity port that socks5 proxy should connect to.") + cmd.Flags().Uint32Var(&servingPort, "serving-port", 8090, "The port that socks5 proxy should serve on.") + cmd.Flags().BoolVar(&opts.ConnectDirectlyToCloudAPIs, "connect-directly-to-cloud-apis", false, "If true, traffic destined for AWS or Azure APIs should be sent there directly rather than going through konnectivity. If enabled, proxy env vars from the mgmt cluster must be propagated to this container") + cmd.Flags().BoolVar(&opts.ResolveFromGuestClusterDNS, "resolve-from-guest-cluster-dns", false, "If DNS resolving should use the guest clusters cluster-dns") + cmd.Flags().BoolVar(&opts.ResolveFromManagementClusterDNS, "resolve-from-management-cluster-dns", false, "If guest cluster's dns fails, fallback to the management cluster's dns") + cmd.Flags().BoolVar(&opts.DisableResolver, "disable-resolver", false, "If true, DNS resolving is disabled. Takes precedence over resolve-from-guest-cluster-dns and resolve-from-management-cluster-dns") - cmd.Flags().StringVar(&caCertPath, "ca-cert-path", "/etc/konnectivity/proxy-ca/ca.crt", "The path to the konnectivity client's ca-cert.") - cmd.Flags().StringVar(&clientCertPath, "tls-cert-path", "/etc/konnectivity/proxy-client/tls.crt", "The path to the konnectivity client's tls certificate.") - cmd.Flags().StringVar(&clientKeyPath, "tls-key-path", "/etc/konnectivity/proxy-client/tls.key", "The path to the konnectivity client's private key.") + cmd.Flags().StringVar(&opts.CAFile, "ca-cert-path", "/etc/konnectivity/proxy-ca/ca.crt", "The path to the konnectivity client's ca-cert.") + cmd.Flags().StringVar(&opts.ClientCertFile, "tls-cert-path", "/etc/konnectivity/proxy-client/tls.crt", "The path to the konnectivity client's tls certificate.") + cmd.Flags().StringVar(&opts.ClientKeyFile, "tls-key-path", "/etc/konnectivity/proxy-client/tls.key", "The path to the konnectivity client's private key.") cmd.Run = func(cmd *cobra.Command, args []string) { l.Info("Starting proxy", "version", version.String()) client, err := client.New(ctrl.GetConfigOrDie(), client.Options{}) if err != nil { - panic(err) + fmt.Fprintf(os.Stderr, "Error: cannot get client: %v", err) + os.Exit(1) } - // shouldDNSFallback is modified in runtime by the '(d proxyResolver) Resolve' and dialDirectWithoutProxy functions. - dnsFallbackToMC := &dnsFallbackToManagementCluster{ - mutex: sync.RWMutex{}, - shouldDNSFallback: false, + opts.Client = client + opts.Log = l + + dialer, err := konnectivityproxy.NewKonnectivityDialer(opts) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: cannot initialize konnectivity dialer: %v", err) + os.Exit(1) } - dialFunc := dialFunc(caCertPath, clientCertPath, clientKeyPath, proxyHostname, proxyPort, connectDirectlyToCloudAPIs, resolveFromManagementClusterDNS, dnsFallbackToMC) conf := &socks5.Config{ - Dial: dialFunc, - Resolver: proxyResolver{ - client: client, - resolveFromGuestCluster: resolveFromGuestClusterDNS, - resolveFromManagementCluster: resolveFromManagementClusterDNS, - dnsFallback: dnsFallbackToMC, - guestClusterResolver: &guestClusterResolver{ - log: l, - client: client, - konnectivityDialFunc: dialFunc, - }, - log: l, - }, + Dial: dialer.DialContext, + Resolver: dialer, } server, err := socks5.New(conf) if err != nil { @@ -107,234 +80,3 @@ func NewStartCommand() *cobra.Command { return cmd } - -// dialFunc returns the appropriate dial function based on user and proxy setting configurations -func dialFunc(caCertPath string, clientCertPath string, clientKeyPath string, proxyHostname string, proxyPort int, connectDirectlyToCloudApis bool, resolveFromManagementClusterDNS bool, dnsFallbackToMC *dnsFallbackToManagementCluster) func(ctx context.Context, network string, addr string) (net.Conn, error) { - return func(ctx context.Context, network string, requestAddress string) (net.Conn, error) { - // return a dial direct function which respects any proxy environment settings - if connectDirectlyToCloudApis && isCloudAPI(strings.Split(requestAddress, ":")[0]) { - return dialDirectWithProxy(ctx, network, requestAddress) - } - - // return a dial direct function ignoring any proxy environment settings - shouldDNSFallback := dnsFallbackToMC.get() - if shouldDNSFallback && resolveFromManagementClusterDNS { - return dialDirectWithoutProxy(ctx, network, requestAddress, dnsFallbackToMC) - } - - // get a TLS config based on x509 certs - tlsConfig, err := util.GetClientTLSConfig(caCertPath, clientCertPath, clientKeyPath, proxyHostname, nil) - if err != nil { - return nil, err - } - - // connect to the proxy address and get a TLS connection - proxyAddress := fmt.Sprintf("%s:%d", proxyHostname, proxyPort) - proxyConn, err := tls.Dial("tcp", proxyAddress, tlsConfig) - if err != nil { - return nil, fmt.Errorf("dialing proxy %q failed: %v", proxyAddress, err) - } - connectString := fmt.Sprintf("CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", requestAddress, "127.0.0.1") - _, err = fmt.Fprintf(proxyConn, "%s", connectString) - if err != nil { - return nil, err - } - - // read HTTP response and return the connection - br := bufio.NewReader(proxyConn) - res, err := http.ReadResponse(br, nil) - if err != nil { - return nil, fmt.Errorf("reading HTTP response from CONNECT to %s via proxy %s failed: %v", - requestAddress, proxyAddress, err) - } - if res.StatusCode != 200 { - return nil, fmt.Errorf("proxy error from %s while dialing %s: %v", proxyAddress, requestAddress, res.Status) - } - // It's safe to discard the bufio.Reader here and return the original TCP conn directly because we only use this - // for TLS. In TLS, the client speaks first, so we know there's no unbuffered data, but we can double-check. - if br.Buffered() > 0 { - return nil, fmt.Errorf("unexpected %d bytes of buffered data from CONNECT proxy %q", - br.Buffered(), proxyAddress) - } - return proxyConn, nil - } -} - -// dialDirectWithoutProxy directly connect to the target, ignoring any local proxy settings from the environment -func dialDirectWithoutProxy(ctx context.Context, network, addr string, dnsFallbackToMC *dnsFallbackToManagementCluster) (net.Conn, error) { - var d = net.Dialer{ - Timeout: 2 * time.Minute, - } - connection, err := d.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - dnsFallbackToMC.set(false) - return connection, nil -} - -// dialDirectWithProxy directly connect to the target, respecting any local proxy settings from the environment -func dialDirectWithProxy(ctx context.Context, network, addr string) (net.Conn, error) { - return proxy.Dial(ctx, network, addr) -} - -type dnsFallbackToManagementCluster struct { - shouldDNSFallback bool - mutex sync.RWMutex -} - -func (f *dnsFallbackToManagementCluster) get() bool { - f.mutex.RLock() - defer f.mutex.RUnlock() - value := f.shouldDNSFallback - return value -} - -func (f *dnsFallbackToManagementCluster) set(valueToSet bool) { - f.mutex.Lock() - defer f.mutex.Unlock() - f.shouldDNSFallback = valueToSet -} - -type guestClusterResolver struct { - log logr.Logger - client client.Client - konnectivityDialFunc func(ctx context.Context, network string, addr string) (net.Conn, error) - resolver *net.Resolver - resolverLock sync.Mutex -} - -func (gr *guestClusterResolver) getResolver(ctx context.Context) (*net.Resolver, error) { - gr.resolverLock.Lock() - defer gr.resolverLock.Unlock() - if gr.resolver != nil { - return gr.resolver, nil - } - dnsService := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "openshift-dns", Name: "dns-default"}} - if err := gr.client.Get(ctx, client.ObjectKeyFromObject(dnsService), dnsService); err != nil { - return nil, fmt.Errorf("failed to get dns service from guest cluster: %w", err) - } - dnsIP := dnsService.Spec.ClusterIP - if net.ParseIP(dnsIP) != nil && strings.Contains(dnsIP, ":") && !strings.HasPrefix(dnsIP, "[") { - dnsIP = fmt.Sprintf("[%s]", dnsIP) - } - clusterDNSAddress := dnsIP + ":53" - gr.resolver = &net.Resolver{ - PreferGo: true, - Dial: func(ctx context.Context, network, address string) (net.Conn, error) { - return gr.konnectivityDialFunc(ctx, "tcp", clusterDNSAddress) - }, - } - - return gr.resolver, nil -} - -func (gr *guestClusterResolver) resolve(ctx context.Context, name string) (net.IP, error) { - resolver, err := gr.getResolver(ctx) - if err != nil { - return nil, fmt.Errorf("failed to get resolver: %w", err) - - } - addresses, err := resolver.LookupHost(ctx, name) - if err != nil { - return nil, fmt.Errorf("failed to resolve %q: %w", name, err) - } - if len(addresses) == 0 { - return nil, errors.New("no addresses found") - } - address := net.ParseIP(addresses[0]) - if address == nil { - return nil, fmt.Errorf("failed to parse address %q as IP", addresses[0]) - } - return address, nil -} - -// proxyResolver tries to resolve addresses using the following steps in order: -// 1. Not at all for cloud provider apis, as we do not want to tunnel them through Konnectivity. -// 2. If the address is a valid Kubernetes service and that service exists in the guest cluster, it's clusterIP is returned. -// 3. If --resolve-from-guest-cluster-dns is set, it uses the guest clusters dns. If that fails, fallback to the management cluster's resolution. -// 4. Lastly, Golang's default resolver is used. -type proxyResolver struct { - client client.Client - resolveFromGuestCluster bool - resolveFromManagementCluster bool - dnsFallback *dnsFallbackToManagementCluster - guestClusterResolver *guestClusterResolver - log logr.Logger -} - -func (d proxyResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { - // Preserve the host so we can recognize it - if isCloudAPI(name) { - return ctx, nil, nil - } - l := d.log.WithValues("name", name) - _, ip, err := d.ResolveK8sService(ctx, l, name) - if err != nil { - l.Info("failed to resolve address from Kubernetes service", "err", err.Error()) - if !d.resolveFromGuestCluster { - return socks5.DNSResolver{}.Resolve(ctx, name) - } - - l.Info("looking up address from guest cluster cluster-dns") - address, err := d.guestClusterResolver.resolve(ctx, name) - if err != nil { - l.Error(err, "failed to look up address from guest cluster") - - if d.resolveFromManagementCluster { - l.Info("Fallback to management cluster resolution") - d.dnsFallback.set(true) - return ctx, nil, nil - } - - return ctx, nil, fmt.Errorf("failed to look up name %s from guest cluster cluster-dns: %w", name, err) - } - - l.WithValues("address", address.String()).Info("Successfully looked up address from guest cluster") - return ctx, address, nil - } - - return ctx, ip, nil -} - -func (d proxyResolver) ResolveK8sService(ctx context.Context, l logr.Logger, name string) (context.Context, net.IP, error) { - namespaceNamedService := strings.Split(name, ".") - if len(namespaceNamedService) < 2 { - return nil, nil, fmt.Errorf("unable to derive namespacedName from %v", name) - } - namespacedName := types.NamespacedName{ - Namespace: namespaceNamedService[1], - Name: namespaceNamedService[0], - } - - service := &corev1.Service{} - err := d.client.Get(ctx, namespacedName, service) - if err != nil { - return nil, nil, err - } - - // Convert service name to ip address... - ip := net.ParseIP(service.Spec.ClusterIP) - if ip == nil { - return nil, nil, fmt.Errorf("unable to parse IP %v", ip) - } - - l.Info("resolved address from Kubernetes service", "ip", ip.String()) - - return ctx, ip, nil -} - -// isCloudAPI is a hardcoded list of domains that should not be routed through Konnectivity but be reached -// through the management cluster. This is needed to support management clusters with a proxy configuration, -// as the components themselves already have proxy env vars pointing to the socks proxy (this binary). If we then -// actually end up proxying or not depends on the env for this binary. -// DNS domains. The API list can be found below: -// AWS: https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints -// AZURE: https://docs.microsoft.com/en-us/rest/api/azure/#how-to-call-azure-rest-apis-with-curl -// IBMCLOUD: https://cloud.ibm.com/apidocs/iam-identity-token-api#endpoints -func isCloudAPI(host string) bool { - return strings.HasSuffix(host, ".amazonaws.com") || - strings.HasSuffix(host, ".microsoftonline.com") || - strings.HasSuffix(host, "azure.com") || - strings.HasSuffix(host, "cloud.ibm.com") -} diff --git a/pkg/etcdcli/etcdcli.go b/pkg/etcdcli/etcdcli.go new file mode 100644 index 0000000000..f57c36da46 --- /dev/null +++ b/pkg/etcdcli/etcdcli.go @@ -0,0 +1,455 @@ +package etcdcli + +import ( + "context" + "fmt" + "io/ioutil" + "net/url" + "os" + "strings" + "time" + + grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/openshift/library-go/pkg/operator/events" + "go.etcd.io/etcd/api/v3/etcdserverpb" + "go.etcd.io/etcd/client/pkg/v3/logutil" + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/server/v3/etcdserver" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" +) + +const ( + DefaultDialTimeout = 15 * time.Second + DefragDialTimeout = 60 * time.Second + DefaultClientTimeout = 30 * time.Second +) + +type etcdClientGetter struct { + eventRecorder events.Recorder + + clientPool *EtcdClientPool +} + +func NewEtcdClient(endpointsFunc func() ([]string, error), eventRecorder events.Recorder) EtcdClient { + g := &etcdClientGetter{ + eventRecorder: eventRecorder.WithComponentSuffix("etcd-client"), + } + newFunc := func() (*clientv3.Client, error) { + endpoints, err := endpointsFunc() + if err != nil { + return nil, fmt.Errorf("error retrieving endpoints for new cached client: %w", err) + } + return newEtcdClientWithClientOpts(endpoints, true) + } + + g.clientPool = NewDefaultEtcdClientPool(newFunc, endpointsFunc) + return g +} + +// newEtcdClientWithClientOpts allows customization of the etcd client using ClientOptions. All clients must be manually +// closed by the caller with Close(). +func newEtcdClientWithClientOpts(endpoints []string, skipConnectionTest bool, opts ...ClientOption) (*clientv3.Client, error) { + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, os.Stderr)) + clientOpts, err := newClientOpts(opts...) + if err != nil { + return nil, fmt.Errorf("error during clientOpts: %w", err) + } + + dialOptions := []grpc.DialOption{ + grpc.WithBlock(), // block until the underlying connection is up + grpc.WithChainUnaryInterceptor(grpcprom.UnaryClientInterceptor), + grpc.WithChainStreamInterceptor(grpcprom.StreamClientInterceptor), + } + + // IAN: these are hypershift specific locations. + tlsInfo := transport.TLSInfo{ + CertFile: "/etc/etcd/tls/client/etcd-client.crt", + KeyFile: "/etc/etcd/tls/client/etcd-client.key", + TrustedCAFile: "/etc/etcd/tls/etcd-ca/ca.crt", + } + tlsConfig, err := tlsInfo.ClientConfig() + if err != nil { + return nil, fmt.Errorf("error during client TLSConfig: %w", err) + } + + // Our logs are noisy + lcfg := logutil.DefaultZapLoggerConfig + lcfg.Level = zap.NewAtomicLevelAt(zap.ErrorLevel) + l, err := lcfg.Build() + if err != nil { + return nil, fmt.Errorf("failed building client logger: %w", err) + } + + cfg := &clientv3.Config{ + DialOptions: dialOptions, + Endpoints: endpoints, + DialTimeout: clientOpts.dialTimeout, + TLS: tlsConfig, + Logger: l, + } + + cli, err := clientv3.New(*cfg) + if err != nil { + return nil, fmt.Errorf("failed to make etcd client for endpoints %v: %w", endpoints, err) + } + + // If the endpoint includes a learner member then we skip the test + // as learner members don't support member list + if skipConnectionTest { + return cli, err + } + + // Test client connection. + ctx, cancel := context.WithTimeout(context.Background(), DefaultClientTimeout) + defer cancel() + _, err = cli.MemberList(ctx) + if err != nil { + if clientv3.IsConnCanceled(err) { + return nil, fmt.Errorf("client connection was canceled: %w", err) + } + return nil, fmt.Errorf("error during client connection check: %w", err) + } + + return cli, err +} + +func (g *etcdClientGetter) MemberAddAsLearner(ctx context.Context, peerURL string) error { + cli, err := g.clientPool.Get() + if err != nil { + return err + } + + defer g.clientPool.Return(cli) + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + membersResp, err := cli.MemberList(ctx) + if err != nil { + return err + } + + for _, member := range membersResp.Members { + for _, currPeerURL := range member.PeerURLs { + if currPeerURL == peerURL { + g.eventRecorder.Warningf("MemberAlreadyAdded", "member with peerURL %s already part of the cluster", peerURL) + return nil + } + } + } + + defer func() { + if err != nil { + g.eventRecorder.Warningf("MemberAddAsLearner", "failed to add new member %s: %v", peerURL, err) + } else { + g.eventRecorder.Eventf("MemberAddAsLearner", "successfully added new member %s", peerURL) + } + }() + + _, err = cli.MemberAddAsLearner(ctx, []string{peerURL}) + return err +} + +func (g *etcdClientGetter) MemberPromote(ctx context.Context, member *etcdserverpb.Member) error { + cli, err := g.clientPool.Get() + if err != nil { + return err + } + + defer g.clientPool.Return(cli) + + defer func() { + if err != nil { + // Not being ready for promotion can be a common event until the learner's log + // catches up with the leader, so we don't emit events for failing for that case + if err.Error() == etcdserver.ErrLearnerNotReady.Error() { + return + } + g.eventRecorder.Warningf("MemberPromote", "failed to promote learner member %s: %v", member.PeerURLs[0], err) + } else { + g.eventRecorder.Eventf("MemberPromote", "successfully promoted learner member %v", member.PeerURLs[0]) + } + }() + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + _, err = cli.MemberPromote(ctx, member.ID) + return err +} + +func (g *etcdClientGetter) MemberUpdatePeerURL(ctx context.Context, id uint64, peerURLs []string) error { + if members, err := g.MemberList(ctx); err != nil { + g.eventRecorder.Eventf("MemberUpdate", "updating member %d with peers %v", id, strings.Join(peerURLs, ",")) + } else { + memberName := fmt.Sprintf("%d", id) + for _, member := range members { + if member.ID == id { + memberName = member.Name + break + } + } + g.eventRecorder.Eventf("MemberUpdate", "updating member %q with peers %v", memberName, strings.Join(peerURLs, ",")) + } + + cli, err := g.clientPool.Get() + if err != nil { + return err + } + + defer g.clientPool.Return(cli) + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + _, err = cli.MemberUpdate(ctx, id, peerURLs) + if err != nil { + return err + } + return err +} + +func (g *etcdClientGetter) MemberRemove(ctx context.Context, memberID uint64) error { + cli, err := g.clientPool.Get() + if err != nil { + return err + } + + defer g.clientPool.Return(cli) + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + _, err = cli.MemberRemove(ctx, memberID) + if err == nil { + g.eventRecorder.Eventf("MemberRemove", "removed member with ID: %v", memberID) + } + return err +} + +func (g *etcdClientGetter) MemberList(ctx context.Context) ([]*etcdserverpb.Member, error) { + cli, err := g.clientPool.Get() + if err != nil { + return nil, err + } + + defer g.clientPool.Return(cli) + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + membersResp, err := cli.MemberList(ctx) + if err != nil { + return nil, err + } + + return membersResp.Members, nil +} + +func (g *etcdClientGetter) VotingMemberList(ctx context.Context) ([]*etcdserverpb.Member, error) { + members, err := g.MemberList(ctx) + if err != nil { + return nil, err + } + return filterVotingMembers(members), nil +} + +// Status reports etcd endpoint status of client URL target. Example https://10.0.10.1:2379 +func (g *etcdClientGetter) Status(ctx context.Context, clientURL string) (*clientv3.StatusResponse, error) { + cli, err := g.clientPool.Get() + if err != nil { + return nil, err + } + + defer g.clientPool.Return(cli) + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + return cli.Status(ctx, clientURL) +} + +func (g *etcdClientGetter) GetMember(ctx context.Context, name string) (*etcdserverpb.Member, error) { + members, err := g.MemberList(ctx) + if err != nil { + return nil, err + } + for _, m := range members { + if m.Name == name { + return m, nil + } + } + return nil, apierrors.NewNotFound(schema.GroupResource{Group: "etcd.operator.openshift.io", Resource: "etcdmembers"}, name) +} + +// GetMemberNameOrHost If the member's name is not set, extract ip/hostname from peerURL. Useful with unstarted members. +func GetMemberNameOrHost(member *etcdserverpb.Member) string { + if len(member.Name) == 0 { + u, err := url.Parse(member.PeerURLs[0]) + if err != nil { + klog.Errorf("unstarted member has invalid peerURL: %#v", err) + return "NAME-PENDING-BAD-PEER-URL" + } + return fmt.Sprintf("NAME-PENDING-%s", u.Hostname()) + } + return member.Name +} + +func (g *etcdClientGetter) UnhealthyMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + cli, err := g.clientPool.Get() + if err != nil { + return nil, err + } + + defer g.clientPool.Return(cli) + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + etcdCluster, err := cli.MemberList(ctx) + if err != nil { + return nil, fmt.Errorf("could not get member list %v", err) + } + + memberHealth := getMemberHealth(ctx, etcdCluster.Members) + + unstartedMemberNames := GetUnstartedMemberNames(memberHealth) + if len(unstartedMemberNames) > 0 { + g.eventRecorder.Warningf("UnstartedEtcdMember", "unstarted members: %v", strings.Join(unstartedMemberNames, ",")) + } + + unhealthyMemberNames := GetUnhealthyMemberNames(memberHealth) + if len(unhealthyMemberNames) > 0 { + g.eventRecorder.Warningf("UnhealthyEtcdMember", "unhealthy members: %v", strings.Join(unhealthyMemberNames, ",")) + } + + return memberHealth.GetUnhealthyMembers(), nil +} + +func (g *etcdClientGetter) UnhealthyVotingMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + unhealthyMembers, err := g.UnhealthyMembers(ctx) + if err != nil { + return nil, err + } + return filterVotingMembers(unhealthyMembers), nil +} + +// HealthyMembers performs health check of current members and returns a slice of healthy members and error +// if no healthy members found. +func (g *etcdClientGetter) HealthyMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + cli, err := g.clientPool.Get() + if err != nil { + return nil, err + } + + defer g.clientPool.Return(cli) + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + etcdCluster, err := cli.MemberList(ctx) + if err != nil { + return nil, err + } + + healthyMembers := getMemberHealth(ctx, etcdCluster.Members).GetHealthyMembers() + if len(healthyMembers) == 0 { + return nil, fmt.Errorf("no healthy etcd members found") + } + + return healthyMembers, nil +} + +func (g *etcdClientGetter) HealthyVotingMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + healthyMembers, err := g.HealthyMembers(ctx) + if err != nil { + return nil, err + } + return filterVotingMembers(healthyMembers), nil +} + +func (g *etcdClientGetter) MemberHealth(ctx context.Context) (memberHealth, error) { + cli, err := g.clientPool.Get() + if err != nil { + return nil, err + } + + defer g.clientPool.Return(cli) + + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + etcdCluster, err := cli.MemberList(ctx) + if err != nil { + return nil, err + } + return getMemberHealth(ctx, etcdCluster.Members), nil +} + +func (g *etcdClientGetter) IsMemberHealthy(ctx context.Context, member *etcdserverpb.Member) (bool, error) { + if member == nil { + return false, fmt.Errorf("member can not be nil") + } + memberHealth := getMemberHealth(ctx, []*etcdserverpb.Member{member}) + if len(memberHealth) == 0 { + return false, fmt.Errorf("member health check failed") + } + if memberHealth[0].Healthy { + return true, nil + } + + return false, nil +} + +func (g *etcdClientGetter) MemberStatus(ctx context.Context, member *etcdserverpb.Member) string { + cli, err := g.clientPool.Get() + if err != nil { + klog.Errorf("error getting etcd client: %#v", err) + return EtcdMemberStatusUnknown + } + defer g.clientPool.Return(cli) + + if len(member.ClientURLs) == 0 && member.Name == "" { + return EtcdMemberStatusNotStarted + } + ctx, cancel := context.WithTimeout(ctx, DefaultClientTimeout) + defer cancel() + _, err = cli.Status(ctx, member.ClientURLs[0]) + if err != nil { + klog.Errorf("error getting etcd member %s status: %#v", member.Name, err) + return EtcdMemberStatusUnhealthy + } + + return EtcdMemberStatusAvailable +} + +// Defragment creates a new uncached clientv3 to the given member url and calls clientv3.Client.Defragment. +func (g *etcdClientGetter) Defragment(ctx context.Context, member *etcdserverpb.Member) (*clientv3.DefragmentResponse, error) { + // no g.clientLock necessary, this always returns a new fresh client + cli, err := newEtcdClientWithClientOpts([]string{member.ClientURLs[0]}, false, WithDialTimeout(DefragDialTimeout)) + if err != nil { + return nil, fmt.Errorf("failed to get etcd client for defragment: %w", err) + } + defer func() { + if cli == nil { + return + } + if err := cli.Close(); err != nil { + klog.Errorf("error closing etcd client for defrag: %v", err) + } + }() + + resp, err := cli.Defragment(ctx, member.ClientURLs[0]) + if err != nil { + return nil, fmt.Errorf("error while running defragment: %w", err) + } + return resp, nil +} + +// filterVotingMembers filters out learner members +func filterVotingMembers(members []*etcdserverpb.Member) []*etcdserverpb.Member { + var votingMembers []*etcdserverpb.Member + for _, member := range members { + if member.IsLearner { + continue + } + votingMembers = append(votingMembers, member) + } + return votingMembers +} diff --git a/pkg/etcdcli/etcdcli_opts.go b/pkg/etcdcli/etcdcli_opts.go new file mode 100644 index 0000000000..0338592acd --- /dev/null +++ b/pkg/etcdcli/etcdcli_opts.go @@ -0,0 +1,31 @@ +package etcdcli + +import ( + "time" +) + +type ClientOptions struct { + dialTimeout time.Duration +} + +func newClientOpts(opts ...ClientOption) (*ClientOptions, error) { + clientOpts := &ClientOptions{ + dialTimeout: DefaultDialTimeout, + } + clientOpts.applyOpts(opts) + return clientOpts, nil +} + +func (co *ClientOptions) applyOpts(opts []ClientOption) { + for _, opt := range opts { + opt(co) + } +} + +type ClientOption func(*ClientOptions) + +func WithDialTimeout(timeout time.Duration) ClientOption { + return func(co *ClientOptions) { + co.dialTimeout = timeout + } +} diff --git a/pkg/etcdcli/etcdcli_pool.go b/pkg/etcdcli/etcdcli_pool.go new file mode 100644 index 0000000000..959ec2874e --- /dev/null +++ b/pkg/etcdcli/etcdcli_pool.go @@ -0,0 +1,184 @@ +package etcdcli + +import ( + "context" + "fmt" + "reflect" + "sort" + "time" + + clientv3 "go.etcd.io/etcd/client/v3" + "k8s.io/klog/v2" +) + +// EtcdClientPool fulfills these requirements: +// * cache clients to avoid re-creating them all the time (TLS handshakes are expensive after all) +// * return an exclusively unused client, no other can acquire the same client at that time +// * health checking a client before using it (using list), return a new one if unhealthy and closing the old one +// * update endpoints, to be always up to date with the changes +// * return a used client to the pool, making it available to consume again +type EtcdClientPool struct { + pool chan *clientv3.Client + availableOpenClients chan int + + newFunc func() (*clientv3.Client, error) + endpointsFunc func() ([]string, error) + healthFunc func(*clientv3.Client) error + closeFunc func(*clientv3.Client) error +} + +const retries = 3 + +// have some small linear retries of 2s * retry in order to fail gracefully +const linearRetryBaseSleep = 2 * time.Second + +// that controls the channel size, which controls how many unused clients we are keeping in buffer +const maxNumCachedClients = 5 + +// that controls how many clients are being created, you need to have a free message in the availableOpenClients channel to create a client +// this protects etcd from being hit by too many clients at once, eg when it is down or recovering or hit by lots of QPS +const maxNumOpenClients = 10 +const maxAcquireTime = 5 * time.Second + +// Get returns a client that can be used exclusively by the caller, +// the caller must not close the client but return it using Return. +// This is intentionally not a fast operation, Get will ensure the client returned will be healthy and retries on errors. +// If no client is available, this method will block intentionally to protect etcd from being overwhelmed by too many clients at once. +func (p *EtcdClientPool) Get() (*clientv3.Client, error) { + desiredEndpoints, err := p.endpointsFunc() + if err != nil { + return nil, fmt.Errorf("getting cache client could not retrieve endpoints: %w", err) + } + + // retrying this a few times until the caller gets a healthy client + for i := 0; i < retries; i++ { + if i != 0 { + time.Sleep(linearRetryBaseSleep * time.Duration(i)) + } + + var client *clientv3.Client + select { + case client = <-p.pool: + default: + // blocks the creation when there are too many clients, after timeout we reject the request immediately without retry + select { + case <-p.availableOpenClients: + case <-time.After(maxAcquireTime): + return nil, fmt.Errorf("too many active cache clients, rejecting to create new one") + } + + klog.Infof("creating a new cached client") + c, err := p.newFunc() + if err != nil { + klog.Warningf("could not create a new cached client after %d tries, trying again. Err: %v", i, err) + returnClosedClient(p.availableOpenClients) + continue + } + + client = c + } + + // we're sorting as reflect.DeepEqual is depending on order + sort.Strings(desiredEndpoints) + currentEndpoints := client.Endpoints() + // client returns a defensive copy, so should be fine to sort in-place + sort.Strings(currentEndpoints) + if !reflect.DeepEqual(desiredEndpoints, currentEndpoints) { + klog.Warningf("cached client detected change in endpoints [%s] vs. [%s]", currentEndpoints, desiredEndpoints) + // normally we could just set the endpoints directly, but this allows us to add some useful logging + client.SetEndpoints(desiredEndpoints...) + } + + err = p.healthFunc(client) + if err != nil { + klog.Warningf("cached client considered unhealthy after %d tries, trying again. Err: %v", i, err) + // try to close the broken client and return the client to the pool + returnClosedClient(p.availableOpenClients) + err = p.closeFunc(client) + if err != nil { + klog.Errorf("could not close unhealthy cache client: %v", err) + } + continue + } + + return client, nil + } + + return nil, fmt.Errorf("giving up getting a cached client after %d tries", retries) +} + +// Return will make the given client available for other callers through Get again. +// When the underlying pool is filled it will close the client instead of waiting for a free spot. +func (p *EtcdClientPool) Return(client *clientv3.Client) { + if client == nil { + return + } + + select { + case p.pool <- client: + default: + returnClosedClient(p.availableOpenClients) + err := p.closeFunc(client) + if err != nil { + klog.Errorf("failed to close extra etcd client which is not being re-added in the client pool: %v", err) + } + } +} + +// returnClosedClient will attempt to return a client to the channel, but will not block when the channel is at capacity +func returnClosedClient(channel chan int) { + select { + case channel <- 1: + default: + } +} + +func NewDefaultEtcdClientPool(newFunc func() (*clientv3.Client, error), endpointsFunc func() ([]string, error)) *EtcdClientPool { + healthFunc := func(client *clientv3.Client) error { + if client == nil { + return fmt.Errorf("cached client was nil") + } + ctx, cancel := context.WithTimeout(context.Background(), DefaultClientTimeout) + defer cancel() + _, err := client.MemberList(ctx) + if err != nil { + if clientv3.IsConnCanceled(err) { + return fmt.Errorf("cache client health connection was canceled: %w", err) + } + return fmt.Errorf("error during cache client health connection check: %w", err) + } + return nil + } + + closeFunc := func(client *clientv3.Client) error { + if client == nil { + return nil + } + klog.Infof("closing cached client") + return client.Close() + } + + return NewEtcdClientPool(newFunc, endpointsFunc, healthFunc, closeFunc) +} + +func NewEtcdClientPool( + newFunc func() (*clientv3.Client, error), + endpointsFunc func() ([]string, error), + healthFunc func(*clientv3.Client) error, + closeFunc func(*clientv3.Client) error) *EtcdClientPool { + + // pre-populate clients for client creation + availableOpenClients := make(chan int, maxNumOpenClients) + for i := 0; i < maxNumOpenClients; i++ { + availableOpenClients <- i + } + + return &EtcdClientPool{ + pool: make(chan *clientv3.Client, maxNumCachedClients), + availableOpenClients: availableOpenClients, + newFunc: newFunc, + endpointsFunc: endpointsFunc, + healthFunc: healthFunc, + closeFunc: closeFunc, + } +} diff --git a/pkg/etcdcli/etcdcli_pool_test.go b/pkg/etcdcli/etcdcli_pool_test.go new file mode 100644 index 0000000000..df34d1e1d0 --- /dev/null +++ b/pkg/etcdcli/etcdcli_pool_test.go @@ -0,0 +1,433 @@ +package etcdcli + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/integration" +) + +// rather poor men's approach to mocking +type clientPoolRecorder struct { + pool *EtcdClientPool + + numNewCalls int + numEndpointCalls int + numHealthCalls int + numCloseCalls int + + newFuncErrReturn error + healthCheckErrReturn error + endpointErrReturn error + closeFuncErrReturn error + updatedEndpoints []string +} + +func TestClientGetReturnHappyPath(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 1, poolRecorder.numEndpointCalls) + assert.Equal(t, 1, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) +} + +func TestClientEndpointFailureReturnsImmediately(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + poolRecorder.endpointErrReturn = errors.New("fail") + + client, err := poolRecorder.pool.Get() + require.Error(t, err, "expected endpoint error fail, but got %w", err) + assert.Nil(t, client) + assert.Equal(t, 0, poolRecorder.numNewCalls) + assert.Equal(t, 1, poolRecorder.numEndpointCalls) + assert.Equal(t, 0, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) +} + +func TestClientDoubleGetReturnsNewClient(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 1, poolRecorder.numEndpointCalls) + assert.Equal(t, 1, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + // not returning the given client + client, err = poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 2, poolRecorder.numNewCalls) + assert.Equal(t, 2, poolRecorder.numEndpointCalls) + assert.Equal(t, 2, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) +} + +func TestClientReusesClientsReturned(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 1, poolRecorder.numEndpointCalls) + assert.Equal(t, 1, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + poolRecorder.pool.Return(client) + client, err = poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 2, poolRecorder.numEndpointCalls) + assert.Equal(t, 2, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) +} + +func TestClientClosesOnChannelCapacity(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + var clients []*clientv3.Client + for i := 0; i < maxNumCachedClients+1; i++ { + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + clients = append(clients, client) + } + + // returning all should make sure the last one tripping over capacity should get closed + for _, client := range clients { + poolRecorder.pool.Return(client) + } + + assert.Equal(t, maxNumCachedClients+1, poolRecorder.numNewCalls) + assert.Equal(t, maxNumCachedClients+1, poolRecorder.numEndpointCalls) + assert.Equal(t, maxNumCachedClients+1, poolRecorder.numHealthCalls) + assert.Equal(t, 1, poolRecorder.numCloseCalls) +} + +func TestNewClientWithOpenClients(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + var clients []*clientv3.Client + for i := 0; i < maxNumOpenClients; i++ { + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + clients = append(clients, client) + } + + assert.Equal(t, maxNumOpenClients, poolRecorder.numNewCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numEndpointCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + + // this should block and return an error + client, err := poolRecorder.pool.Get() + assert.Nil(t, client) + assert.Errorf(t, err, "too many active cache clients, rejecting to create new one") + // returning one should unlock the get again + poolRecorder.pool.Return(clients[0]) + client, err = poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, maxNumOpenClients, poolRecorder.numNewCalls) // no new call added + assert.Equal(t, maxNumOpenClients+2, poolRecorder.numEndpointCalls) // called Get twice additionally + assert.Equal(t, maxNumOpenClients+1, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) +} + +func TestClosesReturnOpenClients(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + var clients []*clientv3.Client + for i := 0; i < maxNumOpenClients; i++ { + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + clients = append(clients, client) + } + + assert.Equal(t, maxNumOpenClients, poolRecorder.numNewCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numEndpointCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + + // return all clients to fill the internal cache and cause five to close + for i := 0; i < maxNumOpenClients; i++ { + poolRecorder.pool.Return(clients[i]) + } + assert.Equal(t, maxNumCachedClients, poolRecorder.numCloseCalls) + + // now we should be able to get the full amount of clients again + for i := 0; i < maxNumOpenClients; i++ { + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + } + + // replenish the maxNumCachedClients that were closed earlier + assert.Equal(t, maxNumOpenClients+maxNumCachedClients, poolRecorder.numNewCalls) + // no open clients are available anymore, as we have handed out all clients + assert.Equal(t, 0, len(poolRecorder.pool.availableOpenClients)) + assert.Equal(t, maxNumOpenClients*2, poolRecorder.numEndpointCalls) + assert.Equal(t, maxNumOpenClients*2, poolRecorder.numHealthCalls) + assert.Equal(t, maxNumCachedClients, poolRecorder.numCloseCalls) +} + +func TestClosesReturnOpenClientCloseError(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + var clients []*clientv3.Client + for i := 0; i < maxNumOpenClients; i++ { + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + clients = append(clients, client) + } + + assert.Equal(t, maxNumOpenClients, poolRecorder.numNewCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numEndpointCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + + // return all clients to fill the internal cache and cause five to close + // first close should fail, but not impact anything and certainly not block + poolRecorder.closeFuncErrReturn = errors.New("fail") + for i := 0; i < maxNumOpenClients; i++ { + poolRecorder.pool.Return(clients[i]) + } + assert.Equal(t, maxNumCachedClients, poolRecorder.numCloseCalls) + + // now we should be able to get the full amount of clients again + for i := 0; i < maxNumOpenClients; i++ { + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + } + + // replenish the maxNumCachedClients that were closed earlier + assert.Equal(t, maxNumOpenClients+maxNumCachedClients, poolRecorder.numNewCalls) + assert.Equal(t, maxNumOpenClients*2, poolRecorder.numEndpointCalls) + assert.Equal(t, maxNumOpenClients*2, poolRecorder.numHealthCalls) + assert.Equal(t, maxNumCachedClients, poolRecorder.numCloseCalls) +} + +// this scenario used to lock-up etcd on start-up a lot, as the client does some initial connection testing that may fail +// eventually it will be exhausting all the openClient quota +func TestFailingOnCreationReturnsClients(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + // this should already happen at maxNumOpenClients/numRetries, so we're testing this is working pretty well here. + for i := 0; i < maxNumOpenClients; i++ { + // this error should fail the first retry consistently + poolRecorder.newFuncErrReturn = fmt.Errorf("constant error") + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + } + + // replenish the maxNumCachedClients that were closed earlier + assert.Equal(t, maxNumOpenClients*2, poolRecorder.numNewCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numEndpointCalls) + assert.Equal(t, maxNumOpenClients, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) +} + +func TestClientClosesAndCreatesOnError(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 1, poolRecorder.numEndpointCalls) + assert.Equal(t, 1, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + poolRecorder.pool.Return(client) + + poolRecorder.healthCheckErrReturn = fmt.Errorf("some error") + + client, err = poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 2, poolRecorder.numNewCalls) + assert.Equal(t, 2, poolRecorder.numEndpointCalls) + // 3 calls, since we first test the returned client resulting in failure, then we test the new client + assert.Equal(t, 3, poolRecorder.numHealthCalls) + assert.Equal(t, 1, poolRecorder.numCloseCalls) +} + +func TestClientHealthCheckCloseErrorRetriesAndReturnsClient(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 1, poolRecorder.numEndpointCalls) + assert.Equal(t, 1, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + poolRecorder.pool.Return(client) + + poolRecorder.healthCheckErrReturn = fmt.Errorf("some health error") + poolRecorder.closeFuncErrReturn = fmt.Errorf("some close error") + + client, err = poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 2, poolRecorder.numNewCalls) + assert.Equal(t, 2, poolRecorder.numEndpointCalls) + // 3 calls, since we first test the returned client resulting in failure, then we test the new client + assert.Equal(t, 3, poolRecorder.numHealthCalls) + assert.Equal(t, 1, poolRecorder.numCloseCalls) +} + +func TestClientUpdatesEndpoints(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + client, err := poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 1, poolRecorder.numEndpointCalls) + assert.Equal(t, 1, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) + poolRecorder.pool.Return(client) + + // by default, we're using client 0 going to m0, client 1 should go to m1 + expectedEndpoints := testServer.Client(1).Endpoints() + poolRecorder.updatedEndpoints = expectedEndpoints + + client, err = poolRecorder.pool.Get() + require.NoError(t, err) + assert.NotNil(t, client) + assert.Equal(t, expectedEndpoints, client.Endpoints()) + assert.Equal(t, 1, poolRecorder.numNewCalls) + assert.Equal(t, 2, poolRecorder.numEndpointCalls) + assert.Equal(t, 2, poolRecorder.numHealthCalls) + assert.Equal(t, 0, poolRecorder.numCloseCalls) +} + +func TestClientOpenClientReturnNil(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + // should not panic + assert.NotPanics(t, func() { + poolRecorder.pool.Return(nil) + }) +} + +// we try to return many more clients than we actually handed out, this should fill the pool but not block when it's full +func TestClientOpenClientMultiReturns(t *testing.T) { + integration.BeforeTestExternal(t) + testServer := integration.NewClusterV3(t, &integration.ClusterConfig{Size: 3}) + defer testServer.Terminate(t) + + poolRecorder := newTestPool(testServer) + for i := 0; i < maxNumOpenClients*3; i++ { + poolRecorder.pool.Return(testServer.RandClient()) + } + assert.Equal(t, 0, poolRecorder.numNewCalls) + assert.Equal(t, 0, poolRecorder.numEndpointCalls) + assert.Equal(t, 0, poolRecorder.numHealthCalls) + assert.Equal(t, maxNumOpenClients*3-maxNumCachedClients, poolRecorder.numCloseCalls) +} + +func newTestPool(testServer *integration.ClusterV3) *clientPoolRecorder { + rec := &clientPoolRecorder{} + endpointFunc := func() ([]string, error) { + rec.numEndpointCalls++ + if rec.updatedEndpoints != nil { + endpoints := rec.updatedEndpoints + rec.updatedEndpoints = nil + return endpoints, nil + } + + if rec.endpointErrReturn != nil { + err := rec.endpointErrReturn + rec.endpointErrReturn = nil + return nil, err + } + + return testServer.Client(0).Endpoints(), nil + } + + newFunc := func() (*clientv3.Client, error) { + rec.numNewCalls++ + if rec.newFuncErrReturn != nil { + err := rec.newFuncErrReturn + rec.newFuncErrReturn = nil + return nil, err + } + return testServer.Client(0), nil + } + + healthFunc := func(client *clientv3.Client) error { + rec.numHealthCalls++ + err := rec.healthCheckErrReturn + rec.healthCheckErrReturn = nil + return err + } + + closeFunc := func(client *clientv3.Client) error { + rec.numCloseCalls++ + err := rec.closeFuncErrReturn + rec.closeFuncErrReturn = nil + return err + } + + rec.pool = NewEtcdClientPool(newFunc, endpointFunc, healthFunc, closeFunc) + return rec +} diff --git a/pkg/etcdcli/health.go b/pkg/etcdcli/health.go new file mode 100644 index 0000000000..75b459070f --- /dev/null +++ b/pkg/etcdcli/health.go @@ -0,0 +1,336 @@ +package etcdcli + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "go.etcd.io/etcd/api/v3/etcdserverpb" + clientv3 "go.etcd.io/etcd/client/v3" + "k8s.io/component-base/metrics/legacyregistry" + klog "k8s.io/klog/v2" +) + +func init() { + legacyregistry.RawMustRegister(raftTerms) +} + +const raftTermsMetricName = "etcd_debugging_raft_terms_total" + +// raftTermsCollector is thread-safe internally +var raftTerms = &raftTermsCollector{ + desc: prometheus.NewDesc( + raftTermsMetricName, + "Number of etcd raft terms as observed by each member.", + []string{"member"}, + prometheus.Labels{}, + ), + terms: map[string]uint64{}, + lock: sync.RWMutex{}, +} + +type healthCheck struct { + Member *etcdserverpb.Member + Healthy bool + Took string + Error error +} + +type memberHealth []healthCheck + +func getMemberHealth(ctx context.Context, etcdMembers []*etcdserverpb.Member) memberHealth { + memberHealth := memberHealth{} + for _, member := range etcdMembers { + if !HasStarted(member) { + memberHealth = append(memberHealth, healthCheck{Member: member, Healthy: false}) + continue + } + + const defaultTimeout = 30 * time.Second + resChan := make(chan healthCheck, 1) + go func() { + ctx, cancel := context.WithTimeout(ctx, defaultTimeout) + defer cancel() + + resChan <- checkSingleMemberHealth(ctx, member) + }() + + select { + case res := <-resChan: + memberHealth = append(memberHealth, res) + case <-time.After(defaultTimeout): + memberHealth = append(memberHealth, healthCheck{ + Member: member, + Healthy: false, + Error: fmt.Errorf("30s timeout waiting for member %s to respond to health check", + member.Name)}) + } + + close(resChan) + } + + // Purge any unknown members from the raft term metrics collector. + for _, cachedMember := range raftTerms.List() { + found := false + for _, member := range etcdMembers { + if member.Name == cachedMember { + found = true + break + } + } + + if !found { + // Forget is a map deletion underneath, which is idempotent and under a lock. + raftTerms.Forget(cachedMember) + } + } + + return memberHealth +} + +func checkSingleMemberHealth(ctx context.Context, member *etcdserverpb.Member) healthCheck { + // If the endpoint is for a learner member then we should skip testing the connection + // via the member list call as learners don't support that. + // The learner's connection would get tested in the health check below + skipConnectionTest := false + if member.IsLearner { + skipConnectionTest = true + } + cli, err := newEtcdClientWithClientOpts([]string{member.ClientURLs[0]}, skipConnectionTest) + if err != nil { + return healthCheck{ + Member: member, + Healthy: false, + Error: fmt.Errorf("create client failure: %w", err)} + } + + defer func() { + if err := cli.Close(); err != nil { + klog.Errorf("error closing etcd client for getMemberHealth: %v", err) + } + }() + + st := time.Now() + + var resp *clientv3.GetResponse + if member.IsLearner { + // Learner members only support serializable (without consensus) read requests + resp, err = cli.Get(ctx, "health", clientv3.WithSerializable()) + } else { + // Linearized request to verify health of a voting member + resp, err = cli.Get(ctx, "health") + } + + hc := healthCheck{Member: member, Healthy: false, Took: time.Since(st).String()} + if err == nil { + if resp.Header != nil { + // TODO(thomas): this is a somewhat misplaced side-effect that is safe to call from multiple goroutines + raftTerms.Set(member.Name, resp.Header.RaftTerm) + } + hc.Healthy = true + } else { + klog.Errorf("health check for member (%v) failed: err(%v)", member.Name, err) + hc.Error = fmt.Errorf("health check failed: %w", err) + } + + return hc +} + +// Status returns a reporting of memberHealth status +func (h memberHealth) Status() string { + healthyMembers := h.GetHealthyMembers() + + status := []string{} + if len(h) == len(healthyMembers) { + status = append(status, fmt.Sprintf("%d members are available", len(h))) + } else { + status = append(status, fmt.Sprintf("%d of %d members are available", len(healthyMembers), len(h))) + for _, etcd := range h { + switch { + case !HasStarted(etcd.Member): + status = append(status, fmt.Sprintf("%s has not started", GetMemberNameOrHost(etcd.Member))) + break + case !etcd.Healthy: + status = append(status, fmt.Sprintf("%s is unhealthy", etcd.Member.Name)) + break + } + } + } + return strings.Join(status, ", ") +} + +// GetHealthyMembers returns healthy members +func (h memberHealth) GetHealthyMembers() []*etcdserverpb.Member { + members := []*etcdserverpb.Member{} + for _, etcd := range h { + if etcd.Healthy { + members = append(members, etcd.Member) + } + } + return members +} + +// GetUnhealthy returns unhealthy members +func (h memberHealth) GetUnhealthyMembers() []*etcdserverpb.Member { + members := []*etcdserverpb.Member{} + for _, etcd := range h { + if !etcd.Healthy { + members = append(members, etcd.Member) + } + } + return members +} + +// GetUnstarted returns unstarted members +func (h memberHealth) GetUnstartedMembers() []*etcdserverpb.Member { + members := []*etcdserverpb.Member{} + for _, etcd := range h { + if !HasStarted(etcd.Member) { + members = append(members, etcd.Member) + } + } + return members +} + +// GetUnhealthyMemberNames returns a list of unhealthy member names +func GetUnhealthyMemberNames(memberHealth []healthCheck) []string { + memberNames := []string{} + for _, etcd := range memberHealth { + if !etcd.Healthy { + memberNames = append(memberNames, GetMemberNameOrHost(etcd.Member)) + } + } + return memberNames +} + +// GetHealthyMemberNames returns a list of healthy member names +func GetHealthyMemberNames(memberHealth []healthCheck) []string { + memberNames := []string{} + for _, etcd := range memberHealth { + if etcd.Healthy { + memberNames = append(memberNames, etcd.Member.Name) + } + } + return memberNames +} + +// GetUnstartedMemberNames returns a list of unstarted member names +func GetUnstartedMemberNames(memberHealth []healthCheck) []string { + memberNames := []string{} + for _, etcd := range memberHealth { + if !HasStarted(etcd.Member) { + memberNames = append(memberNames, GetMemberNameOrHost(etcd.Member)) + } + } + return memberNames +} + +// HasStarted return true if etcd member has started. +func HasStarted(member *etcdserverpb.Member) bool { + if len(member.ClientURLs) == 0 { + return false + } + return true +} + +// IsQuorumFaultTolerant checks the current etcd cluster and returns true if the cluster can tolerate the +// loss of a single etcd member. Such loss is common during new static pod revision. +func IsQuorumFaultTolerant(memberHealth []healthCheck) bool { + totalMembers := len(memberHealth) + quorum, err := MinimumTolerableQuorum(totalMembers) + if err != nil { + klog.Errorf("etcd cluster could not determine minimum quorum required. total number of members is %v. minimum quorum required is %v: %s", totalMembers, quorum, err) + return false + } + healthyMembers := len(GetHealthyMemberNames(memberHealth)) + switch { + case totalMembers-quorum < 1: + klog.Errorf("etcd cluster has quorum of %d which is not fault tolerant: %+v", quorum, memberHealth) + return false + case healthyMembers-quorum < 1: + klog.Errorf("etcd cluster has quorum of %d and %d healthy members which is not fault tolerant: %+v", quorum, healthyMembers, memberHealth) + return false + } + return true +} + +// IsQuorumFaultTolerantErr is the same as IsQuorumFaultTolerant but with an error return instead of the log +func IsQuorumFaultTolerantErr(memberHealth []healthCheck) error { + totalMembers := len(memberHealth) + quorum, err := MinimumTolerableQuorum(totalMembers) + if err != nil { + return fmt.Errorf("etcd cluster could not determine minimum quorum required. total number of members is %v. minimum quorum required is %v: %w", totalMembers, quorum, err) + } + healthyMembers := len(GetHealthyMemberNames(memberHealth)) + switch { + case totalMembers-quorum < 1: + return fmt.Errorf("etcd cluster has quorum of %d which is not fault tolerant: %+v", quorum, memberHealth) + case healthyMembers-quorum < 1: + return fmt.Errorf("etcd cluster has quorum of %d and %d healthy members which is not fault tolerant: %+v", quorum, healthyMembers, memberHealth) + } + return nil +} + +func IsClusterHealthy(memberHealth memberHealth) bool { + unhealthyMembers := memberHealth.GetUnhealthyMembers() + if len(unhealthyMembers) > 0 { + return false + } + return true +} + +// raftTermsCollector is a Prometheus collector to re-expose raft terms as a counter. +type raftTermsCollector struct { + desc *prometheus.Desc + terms map[string]uint64 + lock sync.RWMutex +} + +func (c *raftTermsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.desc +} + +func (c *raftTermsCollector) Set(member string, value uint64) { + c.lock.Lock() + defer c.lock.Unlock() + c.terms[member] = value +} + +func (c *raftTermsCollector) Forget(member string) { + c.lock.Lock() + defer c.lock.Unlock() + delete(c.terms, member) +} + +func (c *raftTermsCollector) List() []string { + c.lock.RLock() + defer c.lock.RUnlock() + var members []string + for member := range c.terms { + members = append(members, member) + } + return members +} + +func (c *raftTermsCollector) Collect(ch chan<- prometheus.Metric) { + c.lock.RLock() + defer c.lock.RUnlock() + for member, val := range c.terms { + ch <- prometheus.MustNewConstMetric( + c.desc, + prometheus.CounterValue, + float64(val), + member, + ) + } +} + +func MinimumTolerableQuorum(members int) (int, error) { + if members <= 0 { + return 0, fmt.Errorf("invalid etcd member length: %v", members) + } + return (members / 2) + 1, nil +} diff --git a/pkg/etcdcli/health_test.go b/pkg/etcdcli/health_test.go new file mode 100644 index 0000000000..1ff8ef26b7 --- /dev/null +++ b/pkg/etcdcli/health_test.go @@ -0,0 +1,318 @@ +package etcdcli + +import ( + "fmt" + "github.com/stretchr/testify/require" + "reflect" + "testing" + + "go.etcd.io/etcd/api/v3/etcdserverpb" +) + +func TestMemberHealthStatus(t *testing.T) { + tests := []struct { + name string + memberHealth memberHealth + want string + }{ + { + "test all available members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + healthyMember(3), + }, + "3 members are available", + }, + { + "test an unhealthy member", + []healthCheck{ + healthyMember(1), + healthyMember(2), + unHealthyMember(3), + }, + "2 of 3 members are available, etcd-3 is unhealthy", + }, + { + "test an unstarted member", + []healthCheck{ + healthyMember(1), + healthyMember(2), + unstartedMember(3), + }, + "2 of 3 members are available, NAME-PENDING-10.0.0.3 has not started", + }, + { + "test an unstarted member and an unhealthy member", + []healthCheck{ + healthyMember(1), + unHealthyMember(2), + unstartedMember(3), + }, + "1 of 3 members are available, etcd-2 is unhealthy, NAME-PENDING-10.0.0.3 has not started", + }, + { + "test two unhealthy members", + []healthCheck{ + healthyMember(1), + unHealthyMember(2), + unHealthyMember(3), + }, + "1 of 3 members are available, etcd-2 is unhealthy, etcd-3 is unhealthy", + }, + { + "test two unstarted members", + []healthCheck{ + healthyMember(1), + unstartedMember(2), + unstartedMember(3), + }, + "1 of 3 members are available, NAME-PENDING-10.0.0.2 has not started, NAME-PENDING-10.0.0.3 has not started", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.memberHealth.Status(); got != tt.want { + t.Errorf("test %q = %v, want %v", tt.name, got, tt.want) + } + }) + } +} + +func TestGetUnstartedMemberNames(t *testing.T) { + tests := []struct { + name string + memberHealth memberHealth + want []string + }{ + { + "test all available members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + healthyMember(3), + }, + []string{}, + }, + { + "test an unhealthy members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + unHealthyMember(3), + }, + []string{}, + }, + { + "test an unstarted and an unhealthy member", + []healthCheck{ + unHealthyMember(1), + unstartedMember(2), + healthyMember(3), + }, + []string{"NAME-PENDING-10.0.0.2"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := GetUnstartedMemberNames(tt.memberHealth) + if !reflect.DeepEqual(tt.want, got) { + t.Errorf("test %q = %v, want %v", tt.name, got, tt.want) + } + }) + } +} + +func TestGetUnhealthyMemberNames(t *testing.T) { + tests := []struct { + name string + memberHealth memberHealth + want []string + }{ + { + "test all available members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + healthyMember(3), + }, + []string{}, + }, + { + "test an unhealthy members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + unHealthyMember(3), + }, + []string{"etcd-3"}, + }, + { + "test an unstarted member", + []healthCheck{ + healthyMember(1), + unstartedMember(2), + healthyMember(3), + }, + []string{"NAME-PENDING-10.0.0.2"}, + }, + { + "test an unstarted and an unhealthy member", + []healthCheck{ + unHealthyMember(1), + unstartedMember(2), + healthyMember(3), + }, + []string{"etcd-1", "NAME-PENDING-10.0.0.2"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := GetUnhealthyMemberNames(tt.memberHealth) + if !reflect.DeepEqual(tt.want, got) { + t.Errorf("test %q = %v, want %v", tt.name, got, tt.want) + } + }) + } +} + +func TestIsQuorumFaultTolerant(t *testing.T) { + tests := []struct { + name string + memberHealth memberHealth + want bool + }{ + { + "test all available members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + healthyMember(3), + }, + true, + }, + { + "test an unhealthy members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + unHealthyMember(3), + }, + false, + }, + { + "test an unstarted member", + []healthCheck{ + healthyMember(1), + unstartedMember(2), + healthyMember(3), + }, + false, + }, + { + "test an unstarted and an unhealthy member", + []healthCheck{ + unHealthyMember(1), + unstartedMember(2), + healthyMember(3), + }, + false, + }, + { + "test etcd cluster with less than 3 members", + []healthCheck{ + healthyMember(1), + healthyMember(2), + }, + false, + }, + { + "test empty health check", + []healthCheck{}, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsQuorumFaultTolerant(tt.memberHealth) + if got != tt.want { + t.Errorf("test %q = %v, want %v", tt.name, got, tt.want) + } + }) + } +} + +func unstartedMember(member int) healthCheck { + return healthCheck{ + Member: &etcdserverpb.Member{ + PeerURLs: []string{fmt.Sprintf("https://10.0.0.%d:2380", member)}, + }, + Healthy: false, + } +} +func healthyMember(member int) healthCheck { + return healthCheck{ + Member: &etcdserverpb.Member{ + Name: fmt.Sprintf("etcd-%d", member), + PeerURLs: []string{fmt.Sprintf("https://10.0.0.%d:2380", member)}, + ClientURLs: []string{fmt.Sprintf("https://10.0.0.%d:2379", member)}, + }, + Healthy: true, + } +} + +func TestMinimumTolerableQuorum(t *testing.T) { + + scenarios := []struct { + name string + input int + expErr error + exp int + }{ + { + name: "valid input `3`", + input: 3, + expErr: nil, + exp: 2, + }, + { + name: "valid input `5`", + input: 5, + expErr: nil, + exp: 3, + }, + { + name: "invalid input `0`", + input: 0, + expErr: fmt.Errorf("invalid etcd member length: %v", 0), + exp: 0, + }, + { + name: "invalid input `-10`", + input: -10, + expErr: fmt.Errorf("invalid etcd member length: %v", -10), + exp: 0, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + // act + actual, err := MinimumTolerableQuorum(scenario.input) + // assert + require.Equal(t, scenario.expErr, err) + require.Equal(t, scenario.exp, actual) + }) + } +} + +func unHealthyMember(member int) healthCheck { + return healthCheck{ + Member: &etcdserverpb.Member{ + Name: fmt.Sprintf("etcd-%d", member), + PeerURLs: []string{fmt.Sprintf("https://10.0.0.%d:2380", member)}, + ClientURLs: []string{fmt.Sprintf("https://10.0.0.%d:2379", member)}, + }, + Healthy: false, + } +} diff --git a/pkg/etcdcli/helpers.go b/pkg/etcdcli/helpers.go new file mode 100644 index 0000000000..e16f068779 --- /dev/null +++ b/pkg/etcdcli/helpers.go @@ -0,0 +1,259 @@ +package etcdcli + +import ( + "context" + "fmt" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + clientv3 "go.etcd.io/etcd/client/v3" + v3membership "go.etcd.io/etcd/server/v3/etcdserver/api/membership" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type fakeEtcdClient struct { + members []*etcdserverpb.Member + opts *FakeClientOptions +} + +func (f *fakeEtcdClient) Defragment(ctx context.Context, member *etcdserverpb.Member) (*clientv3.DefragmentResponse, error) { + if len(f.opts.defragErrors) > 0 { + err := f.opts.defragErrors[0] + f.opts.defragErrors = f.opts.defragErrors[1:] + return nil, err + } + // dramatic simplification + f.opts.dbSize = f.opts.dbSizeInUse + return nil, nil +} + +func (f *fakeEtcdClient) Status(ctx context.Context, target string) (*clientv3.StatusResponse, error) { + for _, member := range f.members { + if member.ClientURLs[0] == target { + for _, status := range f.opts.status { + if status.Header.MemberId == member.ID { + return status, nil + } + } + return nil, fmt.Errorf("no status found for member %d matching target %q.", member.ID, target) + } + } + return nil, fmt.Errorf("status failed no match for target: %q", target) +} + +func (f *fakeEtcdClient) MemberAddAsLearner(ctx context.Context, peerURL string) error { + memberCount := len(f.members) + m := &etcdserverpb.Member{ + Name: fmt.Sprintf("m-%d", memberCount+1), + ID: uint64(memberCount + 1), + PeerURLs: []string{peerURL}, + IsLearner: true, + } + f.members = append(f.members, m) + return nil +} + +func (f *fakeEtcdClient) MemberPromote(ctx context.Context, member *etcdserverpb.Member) error { + var memberToPromote *etcdserverpb.Member + for _, m := range f.members { + if m.ID == member.ID { + memberToPromote = m + break + } + } + if memberToPromote == nil { + return fmt.Errorf("member with the given (ID: %d) and (name: %s) doesn't exist", member.ID, member.Name) + } + + if !memberToPromote.IsLearner { + return v3membership.ErrMemberNotLearner + } + + memberToPromote.IsLearner = false + return nil +} + +func (f *fakeEtcdClient) MemberList(ctx context.Context) ([]*etcdserverpb.Member, error) { + return f.members, nil +} + +func (f *fakeEtcdClient) VotingMemberList(ctx context.Context) ([]*etcdserverpb.Member, error) { + members, _ := f.MemberList(ctx) + return filterVotingMembers(members), nil +} + +func (f *fakeEtcdClient) MemberRemove(ctx context.Context, memberID uint64) error { + var memberExists bool + for _, m := range f.members { + if m.ID == memberID { + memberExists = true + break + } + } + if !memberExists { + return fmt.Errorf("member with the given ID: %d doesn't exist", memberID) + } + + var newMemberList []*etcdserverpb.Member + for _, m := range f.members { + if m.ID == memberID { + continue + } + newMemberList = append(newMemberList, m) + } + f.members = newMemberList + return nil +} + +func (f *fakeEtcdClient) MemberHealth(ctx context.Context) (memberHealth, error) { + var healthy, unhealthy int + var memberHealth memberHealth + for _, member := range f.members { + healthCheck := healthCheck{ + Member: member, + } + switch { + // if WithClusterHealth is not passed we default to all healthy + case f.opts.healthyMember == 0 && f.opts.unhealthyMember == 0: + healthCheck.Healthy = true + break + case f.opts.healthyMember > 0 && healthy < f.opts.healthyMember: + healthCheck.Healthy = true + healthy++ + break + case f.opts.unhealthyMember > 0 && unhealthy < f.opts.unhealthyMember: + healthCheck.Healthy = false + unhealthy++ + break + } + memberHealth = append(memberHealth, healthCheck) + } + return memberHealth, nil +} + +// IsMemberHealthy returns true if the number of etcd members equals the member of healthy members. +func (f *fakeEtcdClient) IsMemberHealthy(ctx context.Context, member *etcdserverpb.Member) (bool, error) { + return len(f.members) == f.opts.healthyMember, nil +} + +func (f *fakeEtcdClient) UnhealthyMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + if f.opts.unhealthyMember > 0 { + // unheathy start from beginning + return f.members[0:f.opts.unhealthyMember], nil + } + return []*etcdserverpb.Member{}, nil +} + +func (f *fakeEtcdClient) UnhealthyVotingMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + members, _ := f.UnhealthyMembers(ctx) + return filterVotingMembers(members), nil +} + +func (f *fakeEtcdClient) HealthyMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + if f.opts.healthyMember > 0 { + // healthy start from end + return f.members[f.opts.unhealthyMember:], nil + } + return []*etcdserverpb.Member{}, nil +} + +func (f *fakeEtcdClient) HealthyVotingMembers(ctx context.Context) ([]*etcdserverpb.Member, error) { + members, _ := f.HealthyMembers(ctx) + return filterVotingMembers(members), nil +} + +func (f *fakeEtcdClient) MemberStatus(ctx context.Context, member *etcdserverpb.Member) string { + panic("implement me") +} + +func (f *fakeEtcdClient) GetMember(ctx context.Context, name string) (*etcdserverpb.Member, error) { + for _, m := range f.members { + if m.Name == name { + return m, nil + } + } + return nil, apierrors.NewNotFound(schema.GroupResource{Group: "etcd.operator.openshift.io", Resource: "etcdmembers"}, name) +} + +func (f *fakeEtcdClient) MemberUpdatePeerURL(ctx context.Context, id uint64, peerURL []string) error { + panic("implement me") +} + +func NewFakeEtcdClient(members []*etcdserverpb.Member, opts ...FakeClientOption) (EtcdClient, error) { + status := make([]*clientv3.StatusResponse, len(members)) + fakeEtcdClient := &fakeEtcdClient{ + members: members, + opts: &FakeClientOptions{ + status: status, + }, + } + if opts != nil { + fcOpts := newFakeClientOpts(opts...) + switch { + // validate WithClusterHealth + case fcOpts.healthyMember > 0 || fcOpts.unhealthyMember > 0: + if fcOpts.healthyMember+fcOpts.unhealthyMember != len(members) { + return nil, fmt.Errorf("WithClusterHealth count must equal the numer of members: have %d, want %d ", fcOpts.unhealthyMember+fcOpts.healthyMember, len(members)) + } + } + fakeEtcdClient.opts = fcOpts + } + + return fakeEtcdClient, nil +} + +type FakeClientOptions struct { + client *clientv3.Client + unhealthyMember int + healthyMember int + status []*clientv3.StatusResponse + dbSize int64 + dbSizeInUse int64 + defragErrors []error +} + +func newFakeClientOpts(opts ...FakeClientOption) *FakeClientOptions { + fcOpts := &FakeClientOptions{} + fcOpts.applyFakeOpts(opts) + fcOpts.validateFakeOpts(opts) + return fcOpts +} + +func (fo *FakeClientOptions) applyFakeOpts(opts []FakeClientOption) { + for _, opt := range opts { + opt(fo) + } +} + +func (fo *FakeClientOptions) validateFakeOpts(opts []FakeClientOption) { + for _, opt := range opts { + opt(fo) + } +} + +type FakeClientOption func(*FakeClientOptions) + +type FakeMemberHealth struct { + Healthy int + Unhealthy int +} + +func WithFakeClusterHealth(members *FakeMemberHealth) FakeClientOption { + return func(fo *FakeClientOptions) { + fo.unhealthyMember = members.Unhealthy + fo.healthyMember = members.Healthy + } +} + +func WithFakeStatus(status []*clientv3.StatusResponse) FakeClientOption { + return func(fo *FakeClientOptions) { + fo.status = status + } +} + +// WithFakeDefragErrors configures each call to Defrag to consume one error from the given slice +func WithFakeDefragErrors(errors []error) FakeClientOption { + return func(fo *FakeClientOptions) { + fo.defragErrors = errors + } +} diff --git a/pkg/etcdcli/interfaces.go b/pkg/etcdcli/interfaces.go new file mode 100644 index 0000000000..05c4efa800 --- /dev/null +++ b/pkg/etcdcli/interfaces.go @@ -0,0 +1,83 @@ +package etcdcli + +import ( + "context" + + "go.etcd.io/etcd/api/v3/etcdserverpb" + clientv3 "go.etcd.io/etcd/client/v3" +) + +const ( + EtcdMemberStatusAvailable = "EtcdMemberAvailable" + EtcdMemberStatusNotStarted = "EtcdMemberNotStarted" + EtcdMemberStatusUnhealthy = "EtcdMemberUnhealthy" + EtcdMemberStatusUnknown = "EtcdMemberUnknown" +) + +type EtcdClient interface { + Defragment + MemberAdder + MemberPromoter + MemberHealth + IsMemberHealthy + MemberLister + MemberRemover + HealthyMemberLister + UnhealthyMemberLister + MemberStatusChecker + Status + + GetMember(ctx context.Context, name string) (*etcdserverpb.Member, error) + MemberUpdatePeerURL(ctx context.Context, id uint64, peerURL []string) error +} + +type Defragment interface { + Defragment(ctx context.Context, member *etcdserverpb.Member) (*clientv3.DefragmentResponse, error) +} + +type Status interface { + Status(ctx context.Context, target string) (*clientv3.StatusResponse, error) +} + +type MemberAdder interface { + MemberAddAsLearner(ctx context.Context, peerURL string) error +} + +type MemberPromoter interface { + MemberPromote(ctx context.Context, member *etcdserverpb.Member) error +} + +type MemberHealth interface { + MemberHealth(ctx context.Context) (memberHealth, error) +} +type IsMemberHealthy interface { + IsMemberHealthy(ctx context.Context, member *etcdserverpb.Member) (bool, error) +} +type MemberRemover interface { + MemberRemove(ctx context.Context, memberID uint64) error +} + +type MemberLister interface { + // MemberList lists all members in a cluster + MemberList(ctx context.Context) ([]*etcdserverpb.Member, error) + // VotingMemberList lists all non learner members in a cluster + VotingMemberList(ctx context.Context) ([]*etcdserverpb.Member, error) +} + +type HealthyMemberLister interface { + // HealthyMembers lists all healthy members in a cluster + HealthyMembers(ctx context.Context) ([]*etcdserverpb.Member, error) + // HealthyVotingMembers lists all non learner healthy members in a cluster + HealthyVotingMembers(ctx context.Context) ([]*etcdserverpb.Member, error) +} + +type UnhealthyMemberLister interface { + // UnhealthyMembers lists all unhealthy members in a cluster + UnhealthyMembers(ctx context.Context) ([]*etcdserverpb.Member, error) + // UnhealthyVotingMembers lists all non learner unhealthy members in a cluster + UnhealthyVotingMembers(ctx context.Context) ([]*etcdserverpb.Member, error) +} + +type MemberStatusChecker interface { + MemberStatus(ctx context.Context, member *etcdserverpb.Member) string +} diff --git a/product-cli/cmd/cluster/agent/create.go b/product-cli/cmd/cluster/agent/create.go index 0f21aee8e9..a13bdf7a1a 100644 --- a/product-cli/cmd/cluster/agent/create.go +++ b/product-cli/cmd/cluster/agent/create.go @@ -20,7 +20,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command { AgentNamespace: "", } - cmd.Flags().StringVar(&opts.AgentPlatform.APIServerAddress, "api-server-address", opts.AgentPlatform.APIServerAddress, "The API server address is the IP address for Kubernetes API communication") + cmd.Flags().StringVar(&opts.AgentPlatform.APIServerAddress, "api-server-address", opts.AgentPlatform.APIServerAddress, "The IP address to be used for the hosted cluster's Kubernetes API communication. Requires management cluster connectivity if left unset.") cmd.Flags().StringVar(&opts.AgentPlatform.AgentNamespace, "agent-namespace", opts.AgentPlatform.AgentNamespace, "The namespace in which to search for Agents") _ = cmd.MarkFlagRequired("agent-namespace") _ = cmd.MarkPersistentFlagRequired("pull-secret") diff --git a/product-cli/cmd/cluster/cluster.go b/product-cli/cmd/cluster/cluster.go index ba537ccd10..d37d952b35 100644 --- a/product-cli/cmd/cluster/cluster.go +++ b/product-cli/cmd/cluster/cluster.go @@ -11,13 +11,14 @@ import ( "github.com/openshift/hypershift/product-cli/cmd/cluster/agent" "github.com/openshift/hypershift/product-cli/cmd/cluster/aws" "github.com/openshift/hypershift/product-cli/cmd/cluster/kubevirt" + "github.com/openshift/hypershift/support/globalconfig" ) func NewCreateCommands() *cobra.Command { opts := &core.CreateOptions{ AdditionalTrustBundle: "", Arch: "amd64", - ClusterCIDR: []string{"10.132.0.0/14"}, + ClusterCIDR: []string{globalconfig.DefaultIPv4ClusterCIDR}, ControlPlaneAvailabilityPolicy: "HighlyAvailable", ImageContentSources: "", InfraID: "", @@ -30,11 +31,14 @@ func NewCreateCommands() *cobra.Command { PullSecretFile: "", ReleaseImage: "", Render: false, - ServiceCIDR: []string{"172.31.0.0/16"}, + ServiceCIDR: []string{globalconfig.DefaultIPv4ServiceCIDR}, + DefaultDual: false, Timeout: 0, Wait: false, PausedUntil: "", OLMCatalogPlacement: v1beta1.ManagementOLMCatalogPlacement, + OLMDisableDefaultSources: false, + NetworkType: string(v1beta1.OVNKubernetes), } cmd := &cobra.Command{ @@ -61,17 +65,21 @@ func NewCreateCommands() *cobra.Command { cmd.PersistentFlags().Int32Var(&opts.NodePoolReplicas, "node-pool-replicas", opts.NodePoolReplicas, "If set to 0 or greater, NodePools will be created with that many replicas. If set to less than 0, no NodePools will be created.") cmd.PersistentFlags().StringToStringVar(&opts.NodeSelector, "node-selector", opts.NodeSelector, "A comma separated list of key=value pairs to use as the node selector for the Hosted Control Plane pods to stick to. (e.g. role=cp,disk=fast)") cmd.PersistentFlags().Var(&opts.NodeUpgradeType, "node-upgrade-type", "The NodePool upgrade strategy for how nodes should behave when upgraded. Supported options: Replace, InPlace") - cmd.PersistentFlags().Var(&opts.OLMCatalogPlacement, "olmCatalogPlacement", "The OLM Catalog Placement for the HostedCluster. Supported options: Management, Guest") + cmd.PersistentFlags().Var(&opts.OLMCatalogPlacement, "olm-catalog-placement", "The OLM Catalog Placement for the HostedCluster. Supported options: Management, Guest") + cmd.PersistentFlags().BoolVar(&opts.OLMDisableDefaultSources, "olm-disable-default-sources", opts.OLMDisableDefaultSources, "Disables the OLM default catalog sources for the HostedCluster.") cmd.PersistentFlags().StringVar(&opts.NetworkType, "network-type", opts.NetworkType, "Enum specifying the cluster SDN provider. Supports either Calico, OVNKubernetes, OpenShiftSDN or Other.") cmd.PersistentFlags().StringVar(&opts.PullSecretFile, "pull-secret", opts.PullSecretFile, "Filepath to a pull secret.") cmd.PersistentFlags().StringVar(&opts.ReleaseImage, "release-image", opts.ReleaseImage, "The OCP release image for the HostedCluster.") cmd.PersistentFlags().BoolVar(&opts.Render, "render", opts.Render, "Renders the HostedCluster manifest output as YAML to stdout instead of automatically applying the manifests to the management cluster.") cmd.PersistentFlags().StringArrayVar(&opts.ServiceCIDR, "service-cidr", opts.ServiceCIDR, "The CIDR of the service network. Can be specified multiple times.") + cmd.PersistentFlags().BoolVar(&opts.DefaultDual, "default-dual", opts.DefaultDual, "Defines the Service and Cluster CIDRs as dual-stack default values. This flag is ignored if service-cidr or cluster-cidr are set. Cannot be defined with service-cidr or cluster-cidr flag.") cmd.PersistentFlags().StringVar(&opts.SSHKeyFile, "ssh-key", opts.SSHKeyFile, "Filepath to an SSH key file.") cmd.PersistentFlags().DurationVar(&opts.Timeout, "timeout", opts.Timeout, "If the --wait flag is set, set the optional timeout to limit the duration of the wait (Examples: 30s, 1h30m45s, etc.) 0 means no timeout.") cmd.PersistentFlags().BoolVar(&opts.Wait, "wait", opts.Wait, "If true, the create command will block until the HostedCluster is up. Requires at least one NodePool with at least one node.") cmd.PersistentFlags().StringVar(&opts.PausedUntil, "pausedUntil", opts.PausedUntil, "If a date is provided in RFC3339 format, HostedCluster creation is paused until that date. If the boolean true is provided, HostedCluster creation is paused until the field is removed.") + cmd.MarkFlagsMutuallyExclusive("service-cidr", "default-dual") + cmd.MarkFlagsMutuallyExclusive("cluster-cidr", "default-dual") cmd.AddCommand(agent.NewCreateCommand(opts)) cmd.AddCommand(aws.NewCreateCommand(opts)) cmd.AddCommand(kubevirt.NewCreateCommand(opts)) diff --git a/product-cli/cmd/cluster/kubevirt/create.go b/product-cli/cmd/cluster/kubevirt/create.go index 6335b8ecdf..c9098f5233 100644 --- a/product-cli/cmd/cluster/kubevirt/create.go +++ b/product-cli/cmd/cluster/kubevirt/create.go @@ -46,7 +46,7 @@ func NewCreateCommand(opts *core.CreateOptions) *cobra.Command { cmd.Flags().StringVar(&opts.KubevirtPlatform.NetworkInterfaceMultiQueue, "network-multiqueue", opts.KubevirtPlatform.NetworkInterfaceMultiQueue, `If "Enable", virtual network interfaces configured with a virtio bus will also enable the vhost multiqueue feature for network devices. supported values are "Enable" and "Disable"; default = "Disable"`) cmd.Flags().StringVar(&opts.KubevirtPlatform.QoSClass, "qos-class", opts.KubevirtPlatform.QoSClass, `If "Guaranteed", set the limit cpu and memory of the VirtualMachineInstance, to be the same as the requested cpu and memory; supported values: "Burstable" and "Guaranteed"`) cmd.Flags().StringArrayVar(&opts.KubevirtPlatform.AdditionalNetworks, "additional-network", opts.KubevirtPlatform.AdditionalNetworks, fmt.Sprintf(`Specify additional network that should be attached to the nodes, the "name" field should point to a multus network attachment definition with the format "[namespace]/[name]", it can be specified multiple times to attach to multiple networks. Supported parameters: %s, example: "name:ns1/nad-foo`, params.Supported(kubevirt.NetworkOpts{}))) - cmd.Flags().BoolVar(opts.KubevirtPlatform.AttachDefaultNetwork, "attach-default-network", *opts.KubevirtPlatform.AttachDefaultNetwork, `Specify if the default pod network should be attached to the nodes. This can only be set if --addtional-network is configured`) + cmd.Flags().StringToStringVar(&opts.KubevirtPlatform.VmNodeSelector, "vm-node-selector", opts.KubevirtPlatform.VmNodeSelector, "A comma separated list of key=value pairs to use as the node selector for the KubeVirt VirtualMachines to be scheduled onto. (e.g. role=kubevirt,size=large)") cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() diff --git a/product-cli/main.go b/product-cli/main.go index ceec321ffb..bf48d289a3 100644 --- a/product-cli/main.go +++ b/product-cli/main.go @@ -21,9 +21,9 @@ import ( "os/signal" "syscall" + cliversion "github.com/openshift/hypershift/cmd/version" "github.com/spf13/cobra" - "github.com/openshift/hypershift/pkg/version" "github.com/openshift/hypershift/product-cli/cmd/create" "github.com/openshift/hypershift/product-cli/cmd/destroy" ) @@ -40,14 +40,13 @@ func main() { }, } - cmd.Version = version.String() - ctx, cancel := context.WithCancel(context.Background()) defer cancel() cmd.AddCommand(create.NewCommand()) cmd.AddCommand(destroy.NewCommand()) + cmd.AddCommand(cliversion.NewVersionCommand()) sigs := make(chan os.Signal, 1) signal.Notify(sigs, syscall.SIGINT) diff --git a/support/awsutil/sg.go b/support/awsutil/sg.go index f56a5b3fea..e3b063c201 100644 --- a/support/awsutil/sg.go +++ b/support/awsutil/sg.go @@ -18,28 +18,11 @@ func DefaultWorkerSGEgressRules() []*ec2.IpPermission { } } -func DefaultWorkerSGIngressRules(vpcCIDRBlock, sgGroupID, sgUserID string) []*ec2.IpPermission { - return []*ec2.IpPermission{ - { - IpProtocol: aws.String("icmp"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(vpcCIDRBlock), - }, - }, - FromPort: aws.Int64(-1), - ToPort: aws.Int64(-1), - }, - { - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(vpcCIDRBlock), - }, - }, - FromPort: aws.Int64(22), - ToPort: aws.Int64(22), - }, +// DefaultWorkerSGIngressRules templates out the required inbound security group rules for the default worker security +// group. This AWS security group is attached to worker node EC2 instances and the PrivateLink VPC Endpoint for the +// Hosted Control Plane. +func DefaultWorkerSGIngressRules(machineCIDRs []string, sgGroupID, sgUserID string) []*ec2.IpPermission { + inboundRules := []*ec2.IpPermission{ { FromPort: aws.Int64(4789), ToPort: aws.Int64(4789), @@ -139,49 +122,80 @@ func DefaultWorkerSGIngressRules(vpcCIDRBlock, sgGroupID, sgUserID string) []*ec }, }, }, - { - // This is for the private link endpoint. - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(vpcCIDRBlock), - }, - }, - FromPort: aws.Int64(6443), - ToPort: aws.Int64(6443), - }, - { - // This is for the private link endpoint. - IpProtocol: aws.String("tcp"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(vpcCIDRBlock), - }, - }, - FromPort: aws.Int64(443), - ToPort: aws.Int64(443), - }, - { - // This is for the private link endpoint. - IpProtocol: aws.String("udp"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(vpcCIDRBlock), - }, - }, - FromPort: aws.Int64(6443), - ToPort: aws.Int64(6443), - }, - { - // This is for the private link endpoint. - IpProtocol: aws.String("udp"), - IpRanges: []*ec2.IpRange{ - { - CidrIp: aws.String(vpcCIDRBlock), - }, - }, - FromPort: aws.Int64(443), - ToPort: aws.Int64(443), - }, } + + // Typically, only one machineCIDR is provided, however we handle many machineCIDRs because it is allowed by + // OpenShift. + for _, cidr := range machineCIDRs { + machineCIDRInboundRules := []*ec2.IpPermission{ + { + IpProtocol: aws.String("icmp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String(cidr), + }, + }, + FromPort: aws.Int64(-1), + ToPort: aws.Int64(-1), + }, + { + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String(cidr), + }, + }, + FromPort: aws.Int64(22), + ToPort: aws.Int64(22), + }, + { + // This is for the private link endpoint. + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String(cidr), + }, + }, + FromPort: aws.Int64(6443), + ToPort: aws.Int64(6443), + }, + { + // This is for the private link endpoint. + IpProtocol: aws.String("tcp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String(cidr), + }, + }, + FromPort: aws.Int64(443), + ToPort: aws.Int64(443), + }, + { + // This is for the private link endpoint. + IpProtocol: aws.String("udp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String(cidr), + }, + }, + FromPort: aws.Int64(6443), + ToPort: aws.Int64(6443), + }, + { + // This is for the private link endpoint. + IpProtocol: aws.String("udp"), + IpRanges: []*ec2.IpRange{ + { + CidrIp: aws.String(cidr), + }, + }, + FromPort: aws.Int64(443), + ToPort: aws.Int64(443), + }, + } + + inboundRules = append(inboundRules, machineCIDRInboundRules...) + } + + return inboundRules } diff --git a/support/capabilities/management_cluster_capabilities.go b/support/capabilities/management_cluster_capabilities.go index d4542fba7b..39577e84e2 100644 --- a/support/capabilities/management_cluster_capabilities.go +++ b/support/capabilities/management_cluster_capabilities.go @@ -17,6 +17,14 @@ type CapabiltyChecker interface { Has(capabilities ...CapabilityType) bool } +type MockCapabilityChecker struct { + MockHas func(capabilities ...CapabilityType) bool +} + +func (m *MockCapabilityChecker) Has(capabilities ...CapabilityType) bool { + return m.MockHas(capabilities...) +} + type CapabilityType int const ( diff --git a/support/certs/tls.go b/support/certs/tls.go index 5bdc38dd00..8a89a2e97b 100644 --- a/support/certs/tls.go +++ b/support/certs/tls.go @@ -5,7 +5,7 @@ import ( "crypto/md5" "crypto/rand" "crypto/rsa" - "crypto/sha1" + "crypto/sha512" "crypto/x509" "crypto/x509/pkix" "encoding/base64" @@ -14,6 +14,8 @@ import ( "math" "math/big" "net" + "os" + "strconv" "time" "github.com/google/go-cmp/cmp" @@ -45,6 +47,16 @@ const ( TLSSignerKeyMapKey = "tls.key" // UserCABundleMapKeyis the key value in a user-provided CA configMap. UserCABundleMapKey = "ca-bundle.crt" + // Custom certificate validity. The format of the annotation is a go duration string with a numeric component and unit. + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h" + CertificateValidityAnnotation = "hypershift.openshift.io/certificate-validity" + CertificateValidityEnvVar = "CERTIFICATE_VALIDITY" + // Custom certificate renewal percentage. The format of the annotation is a float64 value between 0 and 1. + // The certificate will renew when less than CertificateRenewalEnvVar of its validity period remains. + // For example, if you set the validity period to 100 days and the renewal percentage to 0.30, + // the certificate will renew when there are fewer than 30 days remaining (100 days * 0.30 = 30 days) before it expires. + CertificateRenewalAnnotation = "hypershift.openshift.io/certificate-renewal" + CertificateRenewalEnvVar = "CERTIFICATE_RENEWAL_PERCENTAGE" ) // CertCfg contains all needed fields to configure a new certificate @@ -117,12 +129,14 @@ func SelfSignedCertificate(cfg *CertCfg, key *rsa.PrivateKey) (*x509.Certificate if err != nil { return nil, err } + + now := time.Now() cert := x509.Certificate{ BasicConstraintsValid: true, IsCA: cfg.IsCA, KeyUsage: cfg.KeyUsages, - NotAfter: time.Now().Add(cfg.Validity), - NotBefore: time.Now(), + NotAfter: now.Add(cfg.Validity), + NotBefore: now, SerialNumber: serial, Subject: cfg.Subject, } @@ -131,7 +145,7 @@ func SelfSignedCertificate(cfg *CertCfg, key *rsa.PrivateKey) (*x509.Certificate return nil, errors.Errorf("certificate subject is not set, or invalid") } - cert.SubjectKeyId, err = rsaPubKeySHA1Hash(&key.PublicKey) + cert.SubjectKeyId, err = rsaPubKeySHA512Hash(&key.PublicKey) if err != nil { return nil, errors.Wrap(err, "failed to set subject key identifier") } @@ -156,13 +170,14 @@ func signedCertificate( return nil, err } + now := time.Now() certTmpl := x509.Certificate{ DNSNames: csr.DNSNames, ExtKeyUsage: cfg.ExtKeyUsages, IPAddresses: csr.IPAddresses, KeyUsage: cfg.KeyUsages, - NotAfter: time.Now().Add(cfg.Validity), - NotBefore: caCert.NotBefore, + NotAfter: now.Add(cfg.Validity), + NotBefore: now, SerialNumber: serial, Subject: csr.Subject, IsCA: cfg.IsCA, @@ -170,7 +185,7 @@ func signedCertificate( BasicConstraintsValid: true, } - certTmpl.SubjectKeyId, err = rsaPubKeySHA1Hash(&key.PublicKey) + certTmpl.SubjectKeyId, err = rsaPubKeySHA512Hash(&key.PublicKey) if err != nil { return nil, errors.Wrap(err, "failed to set subject key identifier") } @@ -182,8 +197,8 @@ func signedCertificate( return x509.ParseCertificate(certBytes) } -func rsaPubKeySHA1Hash(pub *rsa.PublicKey) ([]byte, error) { - hash := sha1.New() +func rsaPubKeySHA512Hash(pub *rsa.PublicKey) ([]byte, error) { + hash := sha512.New() if _, err := hash.Write(pub.N.Bytes()); err != nil { return nil, err } @@ -286,6 +301,13 @@ func ValidateKeyPair(pemKey, pemCertificate []byte, cfg *CertCfg, minimumRemaini errs = append(errs, fmt.Errorf("actual dns names differ from expected: %s", dnsNamesDiff)) } + tolerance := float64(time.Second * 1) + certValidity := cert.NotAfter.Sub(cert.NotBefore) + expectedValidity := cfg.Validity + if diff := (certValidity - expectedValidity).Abs().Seconds(); diff >= float64(tolerance) { + errs = append(errs, fmt.Errorf("actual validity differs from expected with %v seconds", diff)) + } + extUsageDiff := cmp.Diff(cert.ExtKeyUsage, cfg.ExtKeyUsages, cmpopts.SortSlices(func(a, b x509.ExtKeyUsage) bool { return a < b })) if extUsageDiff != "" { errs = append(errs, fmt.Errorf("actual extended key usages differ from expected: %s", extUsageDiff)) @@ -356,15 +378,36 @@ func ReconcileSignedCert( secret.Data[caKey] = append([]byte(nil), ca.Data[opts.CASignerCertMapKey]...) } + certValidity := ValidityOneYear + + if value := os.Getenv(CertificateValidityEnvVar); value != "" { + customCertValidityEnvVar, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("failed to parse custom certificate validity from env var %s: %w", CertificateValidityEnvVar, err) + } + certValidity = customCertValidityEnvVar + } + cfg := &CertCfg{ Subject: pkix.Name{CommonName: cn, Organization: org}, KeyUsages: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, ExtKeyUsages: extUsages, - Validity: ValidityOneYear, + Validity: certValidity, DNSNames: dnsNames, IPAddresses: ipAddresses, } - if err := ValidateKeyPair(secret.Data[keyKey], secret.Data[crtKey], cfg, 30*ValidityOneDay); err == nil { + + minimumRemainingValidity := 30 * ValidityOneDay + + if value := os.Getenv(CertificateRenewalEnvVar); value != "" { + renewalPercentage, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("failed to parse custom certificate renewal percentage from env var %s: %w", CertificateRenewalEnvVar, err) + } + minimumRemainingValidity = time.Duration(float64(certValidity) * renewalPercentage) + } + + if err := ValidateKeyPair(secret.Data[keyKey], secret.Data[crtKey], cfg, minimumRemainingValidity); err == nil { return nil } certBytes, keyBytes, _, err := signCertificate(cfg, ca, opts) diff --git a/support/config/cipher.go b/support/config/cipher.go index 503ef19ff6..06501ae59c 100644 --- a/support/config/cipher.go +++ b/support/config/cipher.go @@ -19,22 +19,12 @@ var openSSLToIANACiphersMap = map[string]string{ "ECDHE-RSA-AES256-GCM-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x30 "ECDHE-ECDSA-CHACHA20-POLY1305": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA9 "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA8 - "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23 - "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27 - "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C - "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D - "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C // TLS 1 "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 "ECDHE-ECDSA-AES256-SHA": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", // 0xC0,0x0A "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 - - // SSL 3 - "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F - "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 - "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A } func MinTLSVersion(securityProfile *configv1.TLSSecurityProfile) string { diff --git a/support/config/constants.go b/support/config/constants.go index 23e81b9b6e..8108dd3e82 100644 --- a/support/config/constants.go +++ b/support/config/constants.go @@ -32,6 +32,7 @@ const ( KASSVCLBAzurePort = 7443 KASSVCPort = 6443 KASPodDefaultPort = 6443 + KASSVCIBMCloudPort = 2040 DefaultServiceNodePortRange = "30000-32767" DefaultSecurityContextUser = 1001 RecommendedLeaseDuration = "137s" diff --git a/support/config/deployment.go b/support/config/deployment.go index 7508305613..3315175472 100644 --- a/support/config/deployment.go +++ b/support/config/deployment.go @@ -45,6 +45,9 @@ type DeploymentConfig struct { DebugDeployments sets.String ResourceRequestOverrides ResourceOverrides IsolateAsRequestServing bool + RevisionHistoryLimit int + + AdditionalRequestServingNodeSelector map[string]string } func (c *DeploymentConfig) SetContainerResourcesIfPresent(container *corev1.Container) { @@ -92,6 +95,9 @@ func (c *DeploymentConfig) ApplyTo(deployment *appsv1.Deployment) { deployment.Spec.Strategy.RollingUpdate.MaxUnavailable = &maxUnavailable } + // set revision history limit + deployment.Spec.RevisionHistoryLimit = pointer.Int32(int32(c.RevisionHistoryLimit)) + // set default security context for pod if c.SetDefaultSecurityContext { deployment.Spec.Template.Spec.SecurityContext = &corev1.PodSecurityContext{ @@ -171,22 +177,42 @@ func (c *DeploymentConfig) setMultizoneSpread(labels map[string]string) { if labels == nil { return } + if c.Scheduling.Affinity == nil { + c.Scheduling.Affinity = &corev1.Affinity{} + } + if c.Scheduling.Affinity.PodAntiAffinity == nil { + c.Scheduling.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{} + } + c.Scheduling.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(c.Scheduling.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + corev1.PodAffinityTerm{ + TopologyKey: corev1.LabelTopologyZone, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + ) +} +// setNodeSpread sets PodAntiAffinity with corev1.LabelHostname as the topology key for a given set of labels. +// This is useful to e.g ensure pods are spread across nodes. +func (c *DeploymentConfig) setNodeSpread(labels map[string]string) { + if labels == nil { + return + } if c.Scheduling.Affinity == nil { c.Scheduling.Affinity = &corev1.Affinity{} } if c.Scheduling.Affinity.PodAntiAffinity == nil { c.Scheduling.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{} } - c.Scheduling.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = - []corev1.PodAffinityTerm{ - { - TopologyKey: corev1.LabelTopologyZone, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: labels, - }, + c.Scheduling.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(c.Scheduling.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, + corev1.PodAffinityTerm{ + TopologyKey: corev1.LabelHostname, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: labels, }, - } + }, + ) } // setColocation sets labels and PodAffinity rules for this deployment so that pods @@ -276,21 +302,29 @@ func (c *DeploymentConfig) setControlPlaneIsolation(hcp *hyperv1.HostedControlPl } if c.IsolateAsRequestServing { + nodeSelectorRequirements := []corev1.NodeSelectorRequirement{ + { + Key: hyperv1.RequestServingComponentLabel, + Operator: corev1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + { + Key: hyperv1.HostedClusterLabel, + Operator: corev1.NodeSelectorOpIn, + Values: []string{clusterKey(hcp)}, + }, + } + for key, value := range c.AdditionalRequestServingNodeSelector { + nodeSelectorRequirements = append(nodeSelectorRequirements, corev1.NodeSelectorRequirement{ + Key: key, + Operator: corev1.NodeSelectorOpIn, + Values: []string{value}, + }) + } c.Scheduling.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &corev1.NodeSelector{ NodeSelectorTerms: []corev1.NodeSelectorTerm{ { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: hyperv1.RequestServingComponentLabel, - Operator: corev1.NodeSelectorOpIn, - Values: []string{"true"}, - }, - { - Key: hyperv1.HostedClusterLabel, - Operator: corev1.NodeSelectorOpIn, - Values: []string{clusterKey(hcp)}, - }, - }, + MatchExpressions: nodeSelectorRequirements, }, }, } @@ -314,6 +348,7 @@ func (c *DeploymentConfig) setLocation(hcp *hyperv1.HostedControlPlane, multiZon // TODO (alberto): pass labels with deployment hash and set this unconditionally so we don't skew setup. if c.Replicas > 1 { c.setMultizoneSpread(multiZoneSpreadLabels) + c.setNodeSpread(multiZoneSpreadLabels) } } @@ -336,6 +371,9 @@ func (c *DeploymentConfig) SetRequestServingDefaults(hcp *hyperv1.HostedControlP if hcp.Annotations[hyperv1.TopologyAnnotation] == hyperv1.DedicatedRequestServingComponentsTopology { c.IsolateAsRequestServing = true } + if hcp.Annotations[hyperv1.RequestServingNodeAdditionalSelectorAnnotation] != "" { + c.AdditionalRequestServingNodeSelector = util.ParseNodeSelector(hcp.Annotations[hyperv1.RequestServingNodeAdditionalSelectorAnnotation]) + } c.SetDefaults(hcp, multiZoneSpreadLabels, replicas) if c.AdditionalLabels == nil { c.AdditionalLabels = map[string]string{} @@ -353,8 +391,8 @@ func (c *DeploymentConfig) SetDefaults(hcp *hyperv1.HostedControlPlane, multiZon c.Replicas = *replicas } c.DebugDeployments = debugDeployments(hcp) - c.ResourceRequestOverrides = resourceRequestOverrides(hcp) + c.RevisionHistoryLimit = 2 c.setLocation(hcp, multiZoneSpreadLabels) // TODO (alberto): make this private, atm is needed for the konnectivity agent daemonset. diff --git a/support/config/deployment_test.go b/support/config/deployment_test.go index c2cdd3a5f4..cabb219a81 100644 --- a/support/config/deployment_test.go +++ b/support/config/deployment_test.go @@ -282,6 +282,12 @@ func TestSetLocation(t *testing.T) { MatchLabels: labels, }, }, + { + TopologyKey: corev1.LabelHostname, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, }, } g.Expect(expected.Scheduling.Affinity.PodAntiAffinity).To(BeEquivalentTo(cfg.Scheduling.Affinity.PodAntiAffinity)) diff --git a/support/globalconfig/apiserver.go b/support/globalconfig/apiserver.go new file mode 100644 index 0000000000..24cf86d0bf --- /dev/null +++ b/support/globalconfig/apiserver.go @@ -0,0 +1,23 @@ +package globalconfig + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + configv1 "github.com/openshift/api/config/v1" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +func APIServerConfiguration() *configv1.APIServer { + return &configv1.APIServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster", + }, + } +} + +func ReconcileAPIServerConfiguration(APIServer *configv1.APIServer, config *hyperv1.ClusterConfiguration) error { + if config != nil && config.APIServer != nil { + APIServer.Spec = *config.APIServer + } + return nil +} diff --git a/support/globalconfig/cloudcreds.go b/support/globalconfig/cloudcreds.go deleted file mode 100644 index 8980ac9b7a..0000000000 --- a/support/globalconfig/cloudcreds.go +++ /dev/null @@ -1,23 +0,0 @@ -package globalconfig - -import ( - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func CloudCredentialsConfiguration() *operatorv1.CloudCredential { - return &operatorv1.CloudCredential{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cluster", - }, - } -} - -func ReconcileCloudCredentialsConfiguration(cfg *operatorv1.CloudCredential) error { - cfg.Spec.CredentialsMode = operatorv1.CloudCredentialsModeManual - - // Because we don't run the CCO, setting the management state to unmanaged. - // This should change if/when we run the CCO on the control plane side. - cfg.Spec.ManagementState = operatorv1.Unmanaged - return nil -} diff --git a/support/globalconfig/image.go b/support/globalconfig/image.go index 2ac52051dc..7536d870ef 100644 --- a/support/globalconfig/image.go +++ b/support/globalconfig/image.go @@ -1,10 +1,10 @@ package globalconfig import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" configv1 "github.com/openshift/api/config/v1" + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" ) func ImageConfig() *configv1.Image { @@ -15,11 +15,14 @@ func ImageConfig() *configv1.Image { } } -func ObservedImageConfig(ns string) *corev1.ConfigMap { - return &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "observed-config-image", - Namespace: ns, - }, +func ReconcileImageConfig(cfg *configv1.Image, hcp *hyperv1.HostedControlPlane) { + if hcp.Spec.Configuration != nil && hcp.Spec.Configuration.Image != nil { + cfg.Spec = *hcp.Spec.Configuration.Image + } +} + +func ReconcileImageConfigFromHostedCluster(cfg *configv1.Image, hc *hyperv1.HostedCluster) { + if hc.Spec.Configuration != nil && hc.Spec.Configuration.Image != nil { + cfg.Spec = *hc.Spec.Configuration.Image } } diff --git a/support/globalconfig/imagecontentsource.go b/support/globalconfig/imagecontentsource.go index 56627a3024..364fa71cd6 100644 --- a/support/globalconfig/imagecontentsource.go +++ b/support/globalconfig/imagecontentsource.go @@ -2,15 +2,20 @@ package globalconfig import ( "context" + "fmt" configv1 "github.com/openshift/api/config/v1" operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/support/capabilities" + "github.com/openshift/hypershift/support/releaseinfo" + hyperutil "github.com/openshift/hypershift/support/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + crclient "sigs.k8s.io/controller-runtime/pkg/client" ) func ImageContentSourcePolicy() *operatorv1alpha1.ImageContentSourcePolicy { @@ -102,23 +107,28 @@ func ReconcileImageDigestMirrors(idms *configv1.ImageDigestMirrorSet, hcp *hyper // // https://issues.redhat.com/browse/OCPNODE-1258 // https://github.com/openshift/hypershift/pull/1776 -func GetAllImageRegistryMirrors(ctx context.Context, client client.Client, mgmtClusterHasIDMSCapability bool) (map[string][]string, error) { +func GetAllImageRegistryMirrors(ctx context.Context, client client.Client, mgmtClusterHasIDMSCapability, mgmtClusterHasICSPCapability bool) (map[string][]string, error) { var mgmtClusterRegistryOverrides = make(map[string][]string) - var err, err2 error - // First, try to find any IDMS CRs in the management cluster if mgmtClusterHasIDMSCapability { - mgmtClusterRegistryOverrides, err = getImageDigestMirrorSets(ctx, client) + idms, err := getImageDigestMirrorSets(ctx, client) if err != nil { return nil, err } + + for key, values := range idms { + mgmtClusterRegistryOverrides[key] = append(mgmtClusterRegistryOverrides[key], values...) + } } - // Next, if no IDMS CRs were found, look for ICSP CRs - if len(mgmtClusterRegistryOverrides) == 0 { - mgmtClusterRegistryOverrides, err2 = getImageContentSourcePolicies(ctx, client) - if err2 != nil { - return nil, err2 + if mgmtClusterHasICSPCapability { + icsp, err := getImageContentSourcePolicies(ctx, client) + if err != nil { + return nil, err + } + + for key, values := range icsp { + mgmtClusterRegistryOverrides[key] = append(mgmtClusterRegistryOverrides[key], values...) } } @@ -178,3 +188,34 @@ func getImageContentSourcePolicies(ctx context.Context, client client.Client) (m return icspRegistryOverrides, nil } + +func RenconcileMgmtImageRegistryOverrides(ctx context.Context, capChecker capabilities.CapabiltyChecker, client crclient.Client, registryOverrides map[string]string) (releaseinfo.ProviderWithOpenShiftImageRegistryOverrides, hyperutil.ImageMetadataProvider, error) { + var ( + imageRegistryMirrors map[string][]string + err error + ) + + if capChecker.Has(capabilities.CapabilityICSP) || capChecker.Has(capabilities.CapabilityIDMS) { + imageRegistryMirrors, err = GetAllImageRegistryMirrors(ctx, client, capChecker.Has(capabilities.CapabilityIDMS), capChecker.Has(capabilities.CapabilityICSP)) + if err != nil { + return nil, nil, fmt.Errorf("failed to reconcile over image registry mirrors: %w", err) + } + } + + releaseProvider := &releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator{ + Delegate: &releaseinfo.RegistryMirrorProviderDecorator{ + Delegate: &releaseinfo.CachedProvider{ + Inner: &releaseinfo.RegistryClientProvider{}, + Cache: map[string]*releaseinfo.ReleaseImage{}, + }, + RegistryOverrides: registryOverrides, + }, + OpenShiftImageRegistryOverrides: imageRegistryMirrors, + } + + imageMetadataProvider := &hyperutil.RegistryClientImageMetadataProvider{ + OpenShiftImageRegistryOverrides: imageRegistryMirrors, + } + + return releaseProvider, imageMetadataProvider, nil +} diff --git a/support/globalconfig/imagecontentsource_test.go b/support/globalconfig/imagecontentsource_test.go new file mode 100644 index 0000000000..cb2d1bb46d --- /dev/null +++ b/support/globalconfig/imagecontentsource_test.go @@ -0,0 +1,332 @@ +package globalconfig + +import ( + "context" + "testing" + + configv1 "github.com/openshift/api/config/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + "github.com/openshift/hypershift/support/api" + "github.com/openshift/hypershift/support/releaseinfo" + hyperutil "github.com/openshift/hypershift/support/util" + "k8s.io/apimachinery/pkg/runtime" + + . "github.com/onsi/gomega" + "github.com/openshift/hypershift/support/capabilities" + "golang.org/x/exp/slices" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestGetAllImageRegistryMirrors(t *testing.T) { + ctx := context.TODO() + g := NewGomegaWithT(t) + testsCases := []struct { + name string + icsp *operatorv1alpha1.ImageContentSourcePolicyList + idms *configv1.ImageDigestMirrorSetList + expectedResult map[string][]string + hasICSPCapability bool + hasIDMSCapability bool + }{ + { + name: "validate ImageRegistryMirrors with only ICSP", + icsp: createFakeICSP(), + expectedResult: map[string][]string{ + "registry1": {"mirror1", "mirror2"}, + "registry2": {"mirror1", "mirror2"}, + "registry3.sample.com/samplens/sampleimage@sha256:123456": { + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + hasICSPCapability: true, + hasIDMSCapability: false, + }, + { + name: "validate ImageRegistryMirrors with only IDMS", + idms: createFakeIDMS(), + expectedResult: map[string][]string{ + "registry1.sample.com/samplens/sampleimage@sha256:123456": {"mirror1.sample.com/samplens/sampleimage@sha256:123456", "mirror1.sample.com/samplens/sampleimage@sha256:123456"}, + "registry2.sample.com/samplens/sampleimage@sha256:123456": {"mirror2.sample.com/samplens/sampleimage@sha256:123456", "mirror2.sample.com/samplens/sampleimage@sha256:123456"}, + "registry3.sample.com/samplens/sampleimage@sha256:123456": {"mirror3.sample.com/samplens/sampleimage@sha256:123456", "mirror3.sample.com/samplens/sampleimage@sha256:123456"}, + }, + hasICSPCapability: false, + hasIDMSCapability: true, + }, + { + name: "validate ImageRegistryMirrors with ICSP and IDMS", + idms: createFakeIDMS(), + icsp: createFakeICSP(), + expectedResult: map[string][]string{ + "registry1.sample.com/samplens/sampleimage@sha256:123456": {"mirror1.sample.com/samplens/sampleimage@sha256:123456", "mirror1.sample.com/samplens/sampleimage@sha256:123456"}, + "registry2.sample.com/samplens/sampleimage@sha256:123456": {"mirror2.sample.com/samplens/sampleimage@sha256:123456", "mirror2.sample.com/samplens/sampleimage@sha256:123456"}, + "registry1": {"mirror1", "mirror2"}, + "registry2": {"mirror1", "mirror2"}, + "registry3.sample.com/samplens/sampleimage@sha256:123456": { + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + hasICSPCapability: true, + hasIDMSCapability: true, + }, + { + name: "validate empty ImageRegistryMirrors", + idms: nil, + icsp: nil, + expectedResult: map[string][]string{}, + hasICSPCapability: true, + hasIDMSCapability: true, + }, + } + + for _, tc := range testsCases { + t.Run(tc.name, func(t *testing.T) { + var objs []client.Object + + testScheme := runtime.NewScheme() + _ = operatorv1alpha1.AddToScheme(testScheme) + _ = configv1.AddToScheme(testScheme) + + if tc.idms != nil { + idmsObjs := make([]client.Object, len(tc.idms.Items)) + for i, idms := range tc.idms.Items { + idmsObjs[i] = &idms + } + objs = append(objs, idmsObjs...) + } + + if tc.icsp != nil { + icspObjs := make([]client.Object, len(tc.icsp.Items)) + for i, icsp := range tc.icsp.Items { + icspObjs[i] = &icsp + } + objs = append(objs, icspObjs...) + } + + client := fake.NewClientBuilder().WithScheme(testScheme).WithObjects(objs...).Build() + + result, err := GetAllImageRegistryMirrors(ctx, client, tc.hasIDMSCapability, tc.hasICSPCapability) + g.Expect(err).To(BeNil()) + g.Expect(result).To(Equal(tc.expectedResult)) + }) + + } +} + +func createFakeICSP() *operatorv1alpha1.ImageContentSourcePolicyList { + return &operatorv1alpha1.ImageContentSourcePolicyList{ + Items: []operatorv1alpha1.ImageContentSourcePolicy{ + { + Spec: operatorv1alpha1.ImageContentSourcePolicySpec{ + RepositoryDigestMirrors: []operatorv1alpha1.RepositoryDigestMirrors{ + { + Source: "registry1", + Mirrors: []string{"mirror1", "mirror2"}, + }, + { + Source: "registry2", + Mirrors: []string{"mirror1", "mirror2"}, + }, + { + Source: "registry3.sample.com/samplens/sampleimage@sha256:123456", + Mirrors: []string{ + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + }, + }, + }, + }, + } +} + +func createFakeIDMS() *configv1.ImageDigestMirrorSetList { + return &configv1.ImageDigestMirrorSetList{ + Items: []configv1.ImageDigestMirrorSet{ + { + Spec: configv1.ImageDigestMirrorSetSpec{ + ImageDigestMirrors: []configv1.ImageDigestMirrors{ + { + Source: "registry1.sample.com/samplens/sampleimage@sha256:123456", + Mirrors: []configv1.ImageMirror{ + "mirror1.sample.com/samplens/sampleimage@sha256:123456", + "mirror1.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + { + Source: "registry2.sample.com/samplens/sampleimage@sha256:123456", + Mirrors: []configv1.ImageMirror{ + "mirror2.sample.com/samplens/sampleimage@sha256:123456", + "mirror2.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + { + Source: "registry3.sample.com/samplens/sampleimage@sha256:123456", + Mirrors: []configv1.ImageMirror{ + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + }, + }, + }, + }, + } +} +func TestReconcileMgmtImageRegistryOverrides(t *testing.T) { + ctx := context.TODO() + g := NewGomegaWithT(t) + + // Define test cases + testCases := []struct { + name string + capChecker capabilities.CapabiltyChecker + registryOverrides map[string]string + idms *configv1.ImageDigestMirrorSetList + icsp *operatorv1alpha1.ImageContentSourcePolicyList + expectedRelease *releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator + expectedMetadata *hyperutil.RegistryClientImageMetadataProvider + expectedError error + expectedErrorMatch string + }{ + { + name: "Test with ICSP and IDMS capabilities", + capChecker: &capabilities.MockCapabilityChecker{ + MockHas: func(capability ...capabilities.CapabilityType) bool { + return slices.Contains(capability, capabilities.CapabilityICSP) || slices.Contains(capability, capabilities.CapabilityIDMS) + }, + }, + registryOverrides: map[string]string{ + "registry1": "override1", + "registry2": "override2", + }, + idms: createFakeIDMS(), + icsp: createFakeICSP(), + expectedRelease: &releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator{ + Delegate: &releaseinfo.RegistryMirrorProviderDecorator{ + Delegate: &releaseinfo.CachedProvider{ + Inner: &releaseinfo.RegistryClientProvider{}, + Cache: map[string]*releaseinfo.ReleaseImage{}, + }, + RegistryOverrides: map[string]string{ + "registry1": "override1", + "registry2": "override2", + }, + }, + OpenShiftImageRegistryOverrides: map[string][]string{ + "registry1": {"mirror1", "mirror2"}, + "registry2": {"mirror1", "mirror2"}, + "registry1.sample.com/samplens/sampleimage@sha256:123456": { + "mirror1.sample.com/samplens/sampleimage@sha256:123456", + "mirror1.sample.com/samplens/sampleimage@sha256:123456", + }, + "registry2.sample.com/samplens/sampleimage@sha256:123456": { + "mirror2.sample.com/samplens/sampleimage@sha256:123456", + "mirror2.sample.com/samplens/sampleimage@sha256:123456", + }, + "registry3.sample.com/samplens/sampleimage@sha256:123456": { + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + }, + expectedMetadata: &hyperutil.RegistryClientImageMetadataProvider{ + OpenShiftImageRegistryOverrides: map[string][]string{ + "registry1": {"mirror1", "mirror2"}, + "registry2": {"mirror1", "mirror2"}, + "registry1.sample.com/samplens/sampleimage@sha256:123456": { + "mirror1.sample.com/samplens/sampleimage@sha256:123456", + "mirror1.sample.com/samplens/sampleimage@sha256:123456", + }, + "registry2.sample.com/samplens/sampleimage@sha256:123456": { + "mirror2.sample.com/samplens/sampleimage@sha256:123456", + "mirror2.sample.com/samplens/sampleimage@sha256:123456", + }, + "registry3.sample.com/samplens/sampleimage@sha256:123456": { + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + "mirror3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + "mirroricsp3.sample.com/samplens/sampleimage@sha256:123456", + }, + }, + }, + expectedError: nil, + }, + { + name: "Test without ICSP and IDMS capabilities", + capChecker: &capabilities.MockCapabilityChecker{ + MockHas: func(capability ...capabilities.CapabilityType) bool { + return false + }, + }, + registryOverrides: map[string]string{ + "registry1": "override1", + "registry2": "override2", + }, + expectedRelease: &releaseinfo.ProviderWithOpenShiftImageRegistryOverridesDecorator{ + Delegate: &releaseinfo.RegistryMirrorProviderDecorator{ + Delegate: &releaseinfo.CachedProvider{ + Inner: &releaseinfo.RegistryClientProvider{}, + Cache: map[string]*releaseinfo.ReleaseImage{}, + }, + RegistryOverrides: map[string]string{ + "registry1": "override1", + "registry2": "override2", + }, + }, + OpenShiftImageRegistryOverrides: nil, + }, + expectedMetadata: &hyperutil.RegistryClientImageMetadataProvider{ + OpenShiftImageRegistryOverrides: nil, + }, + expectedError: nil, + }, + } + + // Run test cases + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var objs []client.Object + + if tc.idms != nil { + idmsObjs := make([]client.Object, len(tc.idms.Items)) + for i, idms := range tc.idms.Items { + idmsObjs[i] = &idms + } + objs = append(objs, idmsObjs...) + } + + if tc.icsp != nil { + icspObjs := make([]client.Object, len(tc.icsp.Items)) + for i, icsp := range tc.icsp.Items { + icspObjs[i] = &icsp + } + objs = append(objs, icspObjs...) + } + client := fake.NewClientBuilder().WithScheme(api.Scheme).WithObjects(objs...).Build() + + releaseProvider, imageMetadataProvider, err := RenconcileMgmtImageRegistryOverrides(ctx, tc.capChecker, client, tc.registryOverrides) + + // Check error + if tc.expectedError != nil { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(tc.expectedErrorMatch)) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + // Check release provider + g.Expect(releaseProvider).To(Equal(tc.expectedRelease)) + + // Check image metadata provider + g.Expect(imageMetadataProvider).To(Equal(tc.expectedMetadata)) + }) + } +} diff --git a/support/globalconfig/network.go b/support/globalconfig/network.go index 947736ab81..5c034f228a 100644 --- a/support/globalconfig/network.go +++ b/support/globalconfig/network.go @@ -11,8 +11,12 @@ import ( ) const ( - defaultIPv4HostPrefix = 23 - defaultIPv6HostPrefix = 64 + defaultIPv4HostPrefix = 23 + defaultIPv6HostPrefix = 64 + DefaultIPv4ServiceCIDR = "172.31.0.0/16" + DefaultIPv6ServiceCIDR = "fd02::/112" + DefaultIPv4ClusterCIDR = "10.132.0.0/14" + DefaultIPv6ClusterCIDR = "fd01::/48" ) func NetworkConfig() *configv1.Network { diff --git a/support/globalconfig/observed.go b/support/globalconfig/observed.go index 6c2bebd304..c36fed34ec 100644 --- a/support/globalconfig/observed.go +++ b/support/globalconfig/observed.go @@ -20,7 +20,6 @@ const ( ) type ObservedConfig struct { - Image *configv1.Image Build *configv1.Build Project *configv1.Project } @@ -52,10 +51,6 @@ func ReadObservedConfig(ctx context.Context, c client.Client, observedConfig *Ob observed *corev1.ConfigMap dest runtime.Object }{ - "image": { - observed: ObservedImageConfig(namespace), - dest: ImageConfig(), - }, "project": { observed: ObservedProjectConfig(namespace), dest: ProjectConfig(), @@ -81,7 +76,6 @@ func ReadObservedConfig(ctx context.Context, c client.Client, observedConfig *Ob } } - observedConfig.Image = configs["image"].dest.(*configv1.Image) observedConfig.Build = configs["build"].dest.(*configv1.Build) observedConfig.Project = configs["project"].dest.(*configv1.Project) diff --git a/support/konnectivityproxy/dialer.go b/support/konnectivityproxy/dialer.go new file mode 100644 index 0000000000..0a4fc3d594 --- /dev/null +++ b/support/konnectivityproxy/dialer.go @@ -0,0 +1,409 @@ +package konnectivityproxy + +import ( + "bufio" + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/armon/go-socks5" + "github.com/go-logr/logr" + "golang.org/x/net/proxy" + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// The ProxyDialer is the dialer used to connect via a Konnectivity proxy +// It implements the ContextDialer and Dialer interfaces as well as a +// the socks5.NameResolver interface to look up names through the konnectivity +// tunnel if necessary. +type ProxyDialer interface { + proxy.ContextDialer + proxy.Dialer + socks5.NameResolver + IsCloudAPI(string) bool +} + +// Options specifies the inputs for creating a Konnectivity dialer. +type Options struct { + // CAFile or CABytes specifies the CA bundle that should be used to verify + // connections to the Konnectivity server. One or the other can be specified, + // not both. REQUIRED. + CAFile string + CABytes []byte + + // ClientCertFile or ClientCertBytes specifies the client certificate to be used + // to authenticate to the Konnectivity server (via mTLS). One or the other can + // be specified, not both. REQUIRED. + ClientCertFile string + ClientCertBytes []byte + + // ClientKeyFile or ClientKeyBytes specifies the client key to be used to + // authenticate to the Konnectivity server (via mTLS). One or the other can be + // specified, not both. REQUIRED. + ClientKeyFile string + ClientKeyBytes []byte + + // KonnectivityHost is the host name of the Konnectivity server proxy. REQUIRED. + KonnectivityHost string + + // KonnectivityPort is the port of the Konnectivity server proxy. REQUIRED. + KonnectivityPort uint32 + + // ConnectDirectlyToCloudAPIs specifies whether cloud APIs should be bypassed + // by the proxy. This is used by the ingress operator to be able to create DNS records + // before worker nodes are present in the cluster. + // See https://github.com/openshift/hypershift/pull/1601 + ConnectDirectlyToCloudAPIs bool + + // ExcludeCloudAPIHosts is a list of hostnames to exclude when determining if a particular + // hostname is a CloudAPI hostname. + // This is needed in the case when we use an internal proxy whose hostname ends in + // one of the cloud API suffixes we check. We should not need to use the management cluster + // proxy to get to the endpoint. + ExcludeCloudAPIHosts []string + + // ResolveFromManagementClusterDNS tells the dialer to fallback to the management + // cluster's DNS (and direct dialer) initially until the konnectivity tunnel is available. + // Once the konnectivity tunnel is available, it no longer falls back on the management + // cluster. This is used by the OAuth server to allow quicker initialization of identity + // providers while worker nodes have not joined. + // See https://github.com/openshift/hypershift/pull/2261 + ResolveFromManagementClusterDNS bool + + // ResolveFromGuestClusterDNS tells the dialer to resolve names using the guest + // cluster's coreDNS service. Used by oauth and ingress operator. + ResolveFromGuestClusterDNS bool + + // ResolveBeforeDial tells the dialer to resolve names before creating a TCP connection + // through the Konnectivity server. This is needed by the HTTPS konnectivity proxy since the + // hostname to be proxied needs to be resolved before being sent to the user's proxy. + ResolveBeforeDial bool + + // DisableResolver disables any name resolution by the resolver. This is used by the CNO. + // See https://github.com/openshift/hypershift/pull/3986 + DisableResolver bool + + // Client for the hosted cluster. This is used by the resolver to resolve names either via + // service name or via coredns. REQUIRED (unless DisableResolver is specified) + Client client.Client + + // Log is the logger to use for the dialer. No log output is generated if not specified. + Log logr.Logger +} + +func (o *Options) Validate() error { + var errs []error + if len(o.CAFile) > 0 && len(o.CABytes) > 0 { + errs = append(errs, fmt.Errorf("cannot specify both CAFile and CABytes")) + } + if len(o.CAFile) == 0 && len(o.CABytes) == 0 { + errs = append(errs, fmt.Errorf("CAFile or CABytes is required")) + } + if len(o.ClientCertFile) > 0 && len(o.ClientCertBytes) > 0 { + errs = append(errs, fmt.Errorf("cannot specify both ClientCertFile and ClientCertBytes")) + } + if len(o.ClientCertFile) == 0 && len(o.ClientCertBytes) == 0 { + errs = append(errs, fmt.Errorf("ClientCertFile or ClientCertBytes is required")) + } + if len(o.ClientKeyFile) > 0 && len(o.ClientKeyBytes) > 0 { + errs = append(errs, fmt.Errorf("cannot specify both ClientKeyFile and ClientKeyBytes")) + } + if len(o.ClientKeyFile) == 0 && len(o.ClientKeyBytes) == 0 { + errs = append(errs, fmt.Errorf("ClientKeyFile or ClientKeyBytes is required")) + } + + if len(o.KonnectivityHost) == 0 { + errs = append(errs, fmt.Errorf("KonnectivityHost is required")) + } + if o.KonnectivityPort == 0 { + errs = append(errs, fmt.Errorf("KonnectivityPort is required")) + } + + if !o.DisableResolver && o.Client == nil { + errs = append(errs, fmt.Errorf("client is required when resolving names")) + } + + return errors.NewAggregate(errs) +} + +func readFileOrBytes(name string, b []byte) ([]byte, error) { + if len(b) > 0 { + return b, nil + } + result, err := os.ReadFile(name) + if err != nil { + return nil, fmt.Errorf("failed to read %s: %w", name, err) + } + return result, nil +} + +// NewKonnectivityDialer creates a dialer that uses a konnectivity server as a +// tunnel to obtain a TCP connection to the target address. The dialer also includes +// a resolver that optionally uses the same konnectivity server to resolve names +// via the CoreDNS service in a hosted cluster. +func NewKonnectivityDialer(opts Options) (ProxyDialer, error) { + if err := opts.Validate(); err != nil { + return nil, fmt.Errorf("failed validation: %w", err) + } + + var caBytes, clientCertBytes, clientKeyBytes []byte + var err error + + caBytes, err = readFileOrBytes(opts.CAFile, opts.CABytes) + if err != nil { + return nil, err + } + clientCertBytes, err = readFileOrBytes(opts.ClientCertFile, opts.ClientCertBytes) + if err != nil { + return nil, err + } + clientKeyBytes, err = readFileOrBytes(opts.ClientKeyFile, opts.ClientKeyBytes) + if err != nil { + return nil, err + } + + proxy := &konnectivityProxy{ + ca: caBytes, + clientCert: clientCertBytes, + clientKey: clientKeyBytes, + konnectivityHost: opts.KonnectivityHost, + konnectivityPort: opts.KonnectivityPort, + connectDirectlyToCloudAPIs: opts.ConnectDirectlyToCloudAPIs, + resolveFromManagementClusterDNS: opts.ResolveFromManagementClusterDNS, + resolveBeforeDial: opts.ResolveBeforeDial, + excludeCloudHosts: sets.New(opts.ExcludeCloudAPIHosts...), + } + proxy.proxyResolver = proxyResolver{ + client: opts.Client, + disableResolver: opts.DisableResolver, + resolveFromGuestCluster: opts.ResolveFromGuestClusterDNS, + resolveFromManagementCluster: opts.ResolveFromManagementClusterDNS, + mustResolve: opts.ResolveBeforeDial, + dnsFallback: &proxy.fallbackToMCDNS, + log: opts.Log, + isCloudAPI: proxy.IsCloudAPI, + } + proxy.proxyResolver.guestClusterResolver = &guestClusterResolver{ + client: opts.Client, + konnectivityDialFunc: proxy.DialContext, + log: opts.Log, + } + return proxy, nil +} + +// konnectivityProxy is the implementation of the ProxyDialer interface +type konnectivityProxy struct { + ca []byte + clientCert []byte + clientKey []byte + konnectivityHost string + konnectivityPort uint32 + connectDirectlyToCloudAPIs bool + resolveFromManagementClusterDNS bool + resolveBeforeDial bool + + proxyResolver + + // fallbackToMCDNS is a synced boolean that keeps track + // of whether to fallback to the management cluster's DNS + // (and dial directly). + // It is initially false, but if lookup through the guest + // fails, then it's set to true. + fallbackToMCDNS syncBool + + tlsConfigOnce sync.Once + tlsConfig *tls.Config + + httpDialerOnce sync.Once + httpDialer proxy.Dialer + + excludeCloudHosts sets.Set[string] +} + +func (p *konnectivityProxy) Dial(network, address string) (net.Conn, error) { + return p.DialContext(context.Background(), network, address) +} + +func (p *konnectivityProxy) getTLSConfig() *tls.Config { + p.tlsConfigOnce.Do(func() { + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(p.ca) { + panic("cannot load client CA") + } + p.tlsConfig = &tls.Config{ + RootCAs: certPool, + MinVersion: tls.VersionTLS12, + } + cert, err := tls.X509KeyPair(p.clientCert, p.clientKey) + if err != nil { + panic(fmt.Sprintf("cannot load client certs: %v", err)) + } + p.tlsConfig.ServerName = p.konnectivityHost + p.tlsConfig.Certificates = []tls.Certificate{cert} + }) + return p.tlsConfig +} + +// DialContext dials the specified address using the specified context. It implements the upstream +// proxy.Dialer interface. +func (p *konnectivityProxy) DialContext(ctx context.Context, network string, requestAddress string) (net.Conn, error) { + log := p.log.WithName("konnectivityProxy.DialContext") + log.V(4).Info("Dial called", "network", network, "requestAddress", requestAddress) + requestHost, requestPort, err := net.SplitHostPort(requestAddress) + if err != nil { + return nil, fmt.Errorf("invalid address (%s): %w", requestAddress, err) + } + log.V(4).Info("Host and port determined", "requestHost", requestHost, "requestPort", requestPort) + // return a dial direct function which respects any proxy environment settings + if p.connectDirectlyToCloudAPIs && p.IsCloudAPI(requestHost) { + p.log.V(4).Info("Host name is cloud API, dialing through mgmt cluster proxy if present") + return p.dialDirectWithProxy(network, requestAddress) + } + + // return a dial direct function ignoring any proxy environment settings + shouldDNSFallback := p.fallbackToMCDNS.get() + if shouldDNSFallback && p.resolveFromManagementClusterDNS { + log.V(4).Info("Should DNS fallback is set to true and resolve from management cluster DNS is true, dialing direct") + return p.dialDirectWithoutProxy(ctx, network, requestAddress) + } + + // get a TLS config based on x509 certs + tlsConfig := p.getTLSConfig() + + // connect to the konnectivity server address and get a TLS connection + konnectivityServerAddress := net.JoinHostPort(p.konnectivityHost, fmt.Sprintf("%d", p.konnectivityPort)) + log.V(4).Info("Dialing konnectivity server", "address", konnectivityServerAddress) + konnectivityConnection, err := tls.Dial("tcp", konnectivityServerAddress, tlsConfig) + if err != nil { + return nil, fmt.Errorf("dialing proxy %q failed: %v", konnectivityServerAddress, err) + } + + if p.resolveBeforeDial && !p.disableResolver && !isIP(requestHost) { + log.V(4).Info("Host name must be resolved before dialing", "host", requestHost) + _, ip, err := p.Resolve(ctx, requestHost) + if err != nil { + return nil, fmt.Errorf("failed to resolve name %s: %w", requestHost, err) + } + p.log.V(4).Info("Host name resolved", "ip", ip.String()) + requestAddress = net.JoinHostPort(ip.String(), requestPort) + } + + // The CONNECT command sent to the Konnectivity server opens a TCP connection + // to the request host via the konnectivity tunnel. + connectString := fmt.Sprintf("CONNECT %s HTTP/1.1\r\nHost: %s\r\n\r\n", requestAddress, requestHost) + log.V(4).Info("Sending connect string to konnectivity server", "connectString", connectString) + _, err = fmt.Fprintf(konnectivityConnection, "%s", connectString) + if err != nil { + log.V(4).Error(err, "Failed to write string to konnectivity server connection") + return nil, err + } + + // read HTTP response and return the connection + br := bufio.NewReader(konnectivityConnection) + p.log.V(4).Info("Reading response from konnectivity server") + res, err := http.ReadResponse(br, nil) + if err != nil { + return nil, fmt.Errorf("reading HTTP response from CONNECT to %s via proxy %s failed: %v", + requestAddress, konnectivityServerAddress, err) + } + if res.StatusCode != 200 { + log.V(4).Info("Status code was not 200", "statusCode", res.StatusCode) + return nil, fmt.Errorf("proxy error from %s while dialing %s: %v", konnectivityServerAddress, requestAddress, res.Status) + } + // It's safe to discard the bufio.Reader here and return the original TCP conn directly because we only use this + // for TLS. In TLS, the client speaks first, so we know there's no unbuffered data, but we can double-check. + if br.Buffered() > 0 { + log.V(4).Info("The response contained buffered data, none expected") + return nil, fmt.Errorf("unexpected %d bytes of buffered data from CONNECT proxy %q", + br.Buffered(), konnectivityServerAddress) + } + log.V(4).Info("Successfully created connection through konnectivity") + return konnectivityConnection, nil +} + +// dialDirectWithoutProxy directly connect to the target, ignoring any local proxy settings from the environment +func (p *konnectivityProxy) dialDirectWithoutProxy(ctx context.Context, network, addr string) (net.Conn, error) { + var d = net.Dialer{ + Timeout: 2 * time.Minute, + } + connection, err := d.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + p.fallbackToMCDNS.set(false) + return connection, nil +} + +// dialDirectWithProxy directly connect to the target, respecting any local proxy settings from the environment +func (p *konnectivityProxy) dialDirectWithProxy(network, addr string) (net.Conn, error) { + p.httpDialerOnce.Do(func() { + if proxyURLStr := os.Getenv("HTTPS_PROXY"); proxyURLStr != "" { + proxyURL, err := url.Parse(proxyURLStr) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to parse HTTPS_PROXY(%s): %v", proxyURLStr, err) + } else { + p.httpDialer = newHTTPDialer(proxyURL) + } + } + if p.httpDialer == nil { + p.httpDialer = proxy.Direct + } + }) + return p.httpDialer.Dial(network, addr) +} + +type syncBool struct { + value bool + mutex sync.RWMutex +} + +func (b *syncBool) get() bool { + b.mutex.RLock() + defer b.mutex.RUnlock() + return b.value +} + +func (f *syncBool) set(valueToSet bool) { + f.mutex.Lock() + defer f.mutex.Unlock() + f.value = valueToSet +} + +// IsCloudAPI is a hardcoded list of domains that should not be routed through Konnectivity but be reached +// through the management cluster. This is needed to support management clusters with a proxy configuration, +// as the components themselves already have proxy env vars pointing to the socks proxy (this binary). If we then +// actually end up proxying or not depends on the env for this binary. +// DNS domains. The API list can be found below: +// AWS: https://docs.aws.amazon.com/general/latest/gr/rande.html#regional-endpoints +// AZURE: https://docs.microsoft.com/en-us/rest/api/azure/#how-to-call-azure-rest-apis-with-curl +// IBMCLOUD: https://cloud.ibm.com/apidocs/iam-identity-token-api#endpoints +func (p *konnectivityProxy) IsCloudAPI(host string) bool { + log := p.log.WithName("konnectivityProxy.IsCloudAPI") + log.V(4).Info("Determining whether host is cloud API", "host", host) + if p.excludeCloudHosts.Has(host) { + log.V(4).Info("Host is in the list of exclude hosts, returnin false") + return false + } + if strings.HasSuffix(host, ".amazonaws.com") || + strings.HasSuffix(host, ".microsoftonline.com") || + strings.HasSuffix(host, "azure.com") || + strings.HasSuffix(host, "cloud.ibm.com") { + log.V(4).Info("Host has one of the cloud API suffixes, returning true") + return true + } + return false +} + +func isIP(address string) bool { + return net.ParseIP(address) != nil +} diff --git a/support/konnectivityproxy/dialer_test.go b/support/konnectivityproxy/dialer_test.go new file mode 100644 index 0000000000..e791ccbf9e --- /dev/null +++ b/support/konnectivityproxy/dialer_test.go @@ -0,0 +1,76 @@ +package konnectivityproxy + +import ( + "testing" + + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestValidate(t *testing.T) { + + tests := []struct { + name string + o Options + expectValid bool + }{ + { + name: "valid options", + o: Options{ + CAFile: "test-ca", + ClientCertBytes: []byte("test-cert"), + ClientKeyFile: "test-key-name", + KonnectivityHost: "example.org", + KonnectivityPort: 123, + Client: fake.NewFakeClient(), + }, + expectValid: true, + }, + { + name: "missing CA", + o: Options{ + ClientCertBytes: []byte("test-cert"), + ClientKeyFile: "test-key-name", + KonnectivityHost: "example.org", + KonnectivityPort: 123, + Client: fake.NewFakeClient(), + }, + expectValid: false, + }, + { + name: "missing KonnectivityPort", + o: Options{ + CABytes: []byte("test-ca"), + ClientCertBytes: []byte("test-cert"), + ClientKeyFile: "test-key-name", + KonnectivityHost: "example.org", + Client: fake.NewFakeClient(), + }, + expectValid: false, + }, + { + name: "client cert file and bytes", + o: Options{ + CAFile: "test-ca", + ClientCertFile: "test-cert-file", + ClientCertBytes: []byte("test-cert"), + ClientKeyFile: "test-key-name", + KonnectivityHost: "example.org", + KonnectivityPort: 123, + Client: fake.NewFakeClient(), + }, + expectValid: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.o.Validate() + if test.expectValid && err != nil { + t.Errorf("unexpected error: %v", err) + } + if !test.expectValid && err == nil { + t.Errorf("did not get expected error") + } + }) + } +} diff --git a/support/konnectivityproxy/proxy_dialer.go b/support/konnectivityproxy/proxy_dialer.go new file mode 100644 index 0000000000..1adbdad0bb --- /dev/null +++ b/support/konnectivityproxy/proxy_dialer.go @@ -0,0 +1,87 @@ +package konnectivityproxy + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/proxy" +) + +func newHTTPDialer(proxyURL *url.URL) proxy.Dialer { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: proxy.Direct.Dial} +} + +// Everything below is a copied from https://github.com/fasthttp/websocket/blob/2f8e79d2aac1e8e5a06518870e872b15608cea90/proxy.go +// as the golang.org/x/net/proxy package only supports socks5 proxies, but does allow registering additional protocols. +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} diff --git a/support/konnectivityproxy/resolver.go b/support/konnectivityproxy/resolver.go new file mode 100644 index 0000000000..def04008c9 --- /dev/null +++ b/support/konnectivityproxy/resolver.go @@ -0,0 +1,165 @@ +package konnectivityproxy + +import ( + "context" + "errors" + "fmt" + "net" + "strings" + "sync" + + "github.com/armon/go-socks5" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// guestClusterResolver uses the Konnectivity dialer to perform a DNS lookup using +// the CoreDNS service of the hosted cluster. It does an initial lookup of the DNS +// service using a hosted cluster client to create an internal resolver that performs +// a TCP lookup on that service. +type guestClusterResolver struct { + log logr.Logger + client client.Client + konnectivityDialFunc func(ctx context.Context, network string, addr string) (net.Conn, error) + resolver *net.Resolver + resolverLock sync.Mutex +} + +func (gr *guestClusterResolver) getResolver(ctx context.Context) (*net.Resolver, error) { + gr.resolverLock.Lock() + defer gr.resolverLock.Unlock() + if gr.resolver != nil { + return gr.resolver, nil + } + dnsService := &corev1.Service{ObjectMeta: metav1.ObjectMeta{Namespace: "openshift-dns", Name: "dns-default"}} + if err := gr.client.Get(ctx, client.ObjectKeyFromObject(dnsService), dnsService); err != nil { + return nil, fmt.Errorf("failed to get dns service from guest cluster: %w", err) + } + dnsIP := dnsService.Spec.ClusterIP + if net.ParseIP(dnsIP) != nil && strings.Contains(dnsIP, ":") && !strings.HasPrefix(dnsIP, "[") { + dnsIP = fmt.Sprintf("[%s]", dnsIP) + } + clusterDNSAddress := dnsIP + ":53" + gr.resolver = &net.Resolver{ + PreferGo: true, + Dial: func(ctx context.Context, network, address string) (net.Conn, error) { + return gr.konnectivityDialFunc(ctx, "tcp", clusterDNSAddress) + }, + } + + return gr.resolver, nil +} + +func (gr *guestClusterResolver) resolve(ctx context.Context, name string) (net.IP, error) { + resolver, err := gr.getResolver(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get resolver: %w", err) + + } + addresses, err := resolver.LookupHost(ctx, name) + if err != nil { + return nil, fmt.Errorf("failed to resolve %q: %w", name, err) + } + if len(addresses) == 0 { + return nil, errors.New("no addresses found") + } + address := net.ParseIP(addresses[0]) + if address == nil { + return nil, fmt.Errorf("failed to parse address %q as IP", addresses[0]) + } + return address, nil +} + +// proxyResolver tries to resolve addresses using the following steps in order: +// 1. Not at all for cloud provider apis (we do not want to tunnel them through Konnectivity) or when disableResolver is true. +// 2. If the address is a valid Kubernetes service and that service exists in the guest cluster, its clusterIP is returned. +// 3. If --resolve-from-guest-cluster-dns is set, it uses the guest clusters dns. If that fails, fallback to the management cluster's resolution. +// 4. Lastly, Golang's default resolver is used. +type proxyResolver struct { + client client.Client + disableResolver bool + resolveFromGuestCluster bool + resolveFromManagementCluster bool + mustResolve bool + dnsFallback *syncBool + guestClusterResolver *guestClusterResolver + log logr.Logger + isCloudAPI func(string) bool +} + +func (d proxyResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { + // Preserve the host so we can recognize it + if d.isCloudAPI(name) || d.disableResolver { + return d.defaultResolve(ctx, name) + } + l := d.log.WithValues("name", name) + _, ip, err := d.ResolveK8sService(ctx, l, name) + if err != nil { + l.Info("failed to resolve address from Kubernetes service", "err", err.Error()) + if !d.resolveFromGuestCluster { + return socks5.DNSResolver{}.Resolve(ctx, name) + } + + l.Info("looking up address from guest cluster cluster-dns") + address, err := d.guestClusterResolver.resolve(ctx, name) + if err != nil { + l.Error(err, "failed to look up address from guest cluster") + + if d.resolveFromManagementCluster { + l.Info("Fallback to management cluster resolution") + d.dnsFallback.set(true) + return d.defaultResolve(ctx, name) + } + + return ctx, nil, fmt.Errorf("failed to look up name %s from guest cluster cluster-dns: %w", name, err) + } + + l.WithValues("address", address.String()).Info("Successfully looked up address from guest cluster") + return ctx, address, nil + } + + return ctx, ip, nil +} + +func (d proxyResolver) defaultResolve(ctx context.Context, name string) (context.Context, net.IP, error) { + // When the resolver is used by the socks5 proxy, a nil response by the resolver + // results in the proxy just using the default system resolver. However, when used by + // the http proxy, a nil response will cause an invalid CONNECT string to be created, + // so we must have a valid response. + // d.mustResolve will be set to true if the dialer needs to resolve names before + // dialing (which is the case of the https proxy) + if d.mustResolve { + return socks5.DNSResolver{}.Resolve(ctx, name) + } + return ctx, nil, nil +} + +func (d proxyResolver) ResolveK8sService(ctx context.Context, l logr.Logger, name string) (context.Context, net.IP, error) { + namespaceNamedService := strings.Split(name, ".") + if len(namespaceNamedService) < 2 { + return nil, nil, fmt.Errorf("unable to derive namespacedName from %v", name) + } + namespacedName := types.NamespacedName{ + Namespace: namespaceNamedService[1], + Name: namespaceNamedService[0], + } + + service := &corev1.Service{} + err := d.client.Get(ctx, namespacedName, service) + if err != nil { + return nil, nil, err + } + + // Convert service name to ip address... + ip := net.ParseIP(service.Spec.ClusterIP) + if ip == nil { + return nil, nil, fmt.Errorf("unable to parse IP %v", ip) + } + + l.Info("resolved address from Kubernetes service", "ip", ip.String()) + + return ctx, ip, nil +} diff --git a/support/metrics/sets.go b/support/metrics/sets.go index 2ef193ee4a..6525fb01bf 100644 --- a/support/metrics/sets.go +++ b/support/metrics/sets.go @@ -46,6 +46,7 @@ type MetricsSetConfig struct { OpenShiftControllerManager []*prometheusoperatorv1.RelabelConfig `json:"openshiftControllerManager,omitempty"` OpenShiftRouteControllerManager []*prometheusoperatorv1.RelabelConfig `json:"openshiftRouteControllerManager,omitempty"` CVO []*prometheusoperatorv1.RelabelConfig `json:"cvo,omitempty"` + CCO []*prometheusoperatorv1.RelabelConfig `json:"cco,omitempty"` OLM []*prometheusoperatorv1.RelabelConfig `json:"olm,omitempty"` CatalogOperator []*prometheusoperatorv1.RelabelConfig `json:"catalogOperator,omitempty"` RegistryOperator []*prometheusoperatorv1.RelabelConfig `json:"registryOperator,omitempty"` @@ -447,7 +448,7 @@ func RegistryOperatorRelabelConfigs(set MetricsSet) []*prometheusoperatorv1.Rela func SREMetricsSetConfigHash(cm *corev1.ConfigMap) string { value, ok := cm.Data[SREConfigurationConfigMapKey] if ok { - return util.HashStruct(value) + return util.HashSimple(value) } return "" } diff --git a/support/oidc/oidc.go b/support/oidc/oidc.go index 3835bb747a..15c851f693 100644 --- a/support/oidc/oidc.go +++ b/support/oidc/oidc.go @@ -12,7 +12,7 @@ import ( "io" "strings" - jose "gopkg.in/square/go-jose.v2" + jose "gopkg.in/go-jose/go-jose.v2" ) type ODICGeneratorParams struct { diff --git a/support/proxy/no_proxy.go b/support/proxy/no_proxy.go new file mode 100644 index 0000000000..400b09f411 --- /dev/null +++ b/support/proxy/no_proxy.go @@ -0,0 +1,76 @@ +package proxy + +// Based on https://github.com/openshift/cluster-network-operator/blob/4b792c659385948e825d2ba17b8f6d2e5c3acfed/pkg/util/proxyconfig/no_proxy.go + +import ( + "fmt" + "strings" + + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "k8s.io/apimachinery/pkg/util/sets" +) + +const defaultCIDR = "0.0.0.0/0" + +func DefaultNoProxy(hcp *hyperv1.HostedControlPlane) string { + set := sets.New( + "127.0.0.1", + "localhost", + ".svc", + ".cluster.local", + ) + set.Insert(".hypershift.local") + + for _, mc := range hcp.Spec.Networking.MachineNetwork { + if mc.CIDR.String() == defaultCIDR { + continue + } + set.Insert(mc.CIDR.String()) + } + + for _, nss := range hcp.Spec.Networking.ServiceNetwork { + set.Insert(nss.CIDR.String()) + } + + switch hcp.Spec.Platform.Type { + case hyperv1.AWSPlatform, hyperv1.AzurePlatform: + set.Insert("169.254.169.254") + } + + // Construct the node sub domain. + // TODO: Add support for additional cloud providers. + switch hcp.Spec.Platform.Type { + case hyperv1.AWSPlatform: + region := hcp.Spec.Platform.AWS.Region + if region == "us-east-1" { + set.Insert(".ec2.internal") + } else { + set.Insert(fmt.Sprintf(".%s.compute.internal", region)) + } + case hyperv1.AzurePlatform: + // if cloudName := hcp.Spec.Platform.Azure.Cloud; cloudName != "AzurePublicCloud" { + // https://learn.microsoft.com/en-us/azure/virtual-network/what-is-ip-address-168-63-129-16 + set.Insert("168.63.129.16") + // https://bugzilla.redhat.com/show_bug.cgi?id=2104997 + // TODO (cewong): determine where the ARMEndpoint is calculated + // if cloudName == "AzureStackCloud" { + // set.Insert(infra.Status.PlatformStatus.Azure.ARMEndpoint) + // } + // } + } + + for _, clusterNetwork := range hcp.Spec.Networking.ClusterNetwork { + set.Insert(clusterNetwork.CIDR.String()) + } + + if hcp.Spec.Configuration != nil && hcp.Spec.Configuration.Proxy != nil && len(hcp.Spec.Configuration.Proxy.NoProxy) > 0 { + for _, userValue := range strings.Split(hcp.Spec.Configuration.Proxy.NoProxy, ",") { + if userValue != "" { + set.Insert(userValue) + } + } + } + + return strings.Join(sets.List(set), ",") + +} diff --git a/support/releaseinfo/registry_image_content_policies_test.go b/support/releaseinfo/registry_image_content_policies_test.go new file mode 100644 index 0000000000..f895176a8f --- /dev/null +++ b/support/releaseinfo/registry_image_content_policies_test.go @@ -0,0 +1,45 @@ +package releaseinfo + +import ( + "context" + "sync" + "testing" + + . "github.com/onsi/gomega" + imagev1 "github.com/openshift/api/image/v1" +) + +func TestProviderWithOpenShiftImageRegistryOverridesDecorator_Lookup(t *testing.T) { + g := NewWithT(t) + + // Create mock resources. + mirroredReleaseImage := "mirrored-release-image" + canonicalReleaseImage := "canonical-release-image" + releaseImage := &ReleaseImage{ + ImageStream: &imagev1.ImageStream{}, + StreamMetadata: &CoreOSStreamMetadata{}, + } + + // Create registry providers delegating to a cached provider so we can mock the cache content for the mirroredReleaseImage. + delegate := &RegistryMirrorProviderDecorator{ + Delegate: &CachedProvider{ + Inner: &RegistryClientProvider{}, + Cache: map[string]*ReleaseImage{ + mirroredReleaseImage: releaseImage, + }, + }, + RegistryOverrides: map[string]string{}, + } + provider := &ProviderWithOpenShiftImageRegistryOverridesDecorator{ + Delegate: delegate, + OpenShiftImageRegistryOverrides: map[string][]string{ + canonicalReleaseImage: []string{mirroredReleaseImage}, + }, + lock: sync.Mutex{}, + } + + // Call the Lookup method and validate GetMirroredReleaseImage. + _, err := provider.Lookup(context.Background(), canonicalReleaseImage, []byte("test-pull-secret")) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(provider.GetMirroredReleaseImage()).To(Equal(mirroredReleaseImage)) +} diff --git a/support/releaseinfo/registry_mirror_provider.go b/support/releaseinfo/registry_mirror_provider.go index 8dc54bc38b..16774d3352 100644 --- a/support/releaseinfo/registry_mirror_provider.go +++ b/support/releaseinfo/registry_mirror_provider.go @@ -31,12 +31,18 @@ func (p *RegistryMirrorProviderDecorator) Lookup(ctx context.Context, image stri if err != nil { return nil, err } - for i := range releaseImage.ImageStream.Spec.Tags { + + imageStream := releaseImage.ImageStream.DeepCopy() // deepCopy so the cache is not overriden. + for i := range imageStream.Spec.Tags { for registrySource, registryDest := range p.RegistryOverrides { - releaseImage.ImageStream.Spec.Tags[i].From.Name = strings.Replace(releaseImage.ImageStream.Spec.Tags[i].From.Name, registrySource, registryDest, 1) + imageStream.Spec.Tags[i].From.Name = strings.Replace(imageStream.Spec.Tags[i].From.Name, registrySource, registryDest, 1) } } - return releaseImage, nil + + return &ReleaseImage{ + ImageStream: imageStream, + StreamMetadata: releaseImage.StreamMetadata, + }, nil } func (p *RegistryMirrorProviderDecorator) GetRegistryOverrides() map[string]string { diff --git a/support/releaseinfo/static_provider.go b/support/releaseinfo/static_provider.go index 83385146b8..65c8fa3607 100644 --- a/support/releaseinfo/static_provider.go +++ b/support/releaseinfo/static_provider.go @@ -39,7 +39,7 @@ func (p *StaticProviderDecorator) Lookup(ctx context.Context, image string, pull Name: image, }, } - releaseImage.Spec.Tags = append(releaseImage.Spec.Tags, ref) + releaseImage.Spec.Tags = append(releaseImage.Spec.Tags, ref) //TODO(cewong): ensure we're not adding tags that are already in the map! } return releaseImage, nil } diff --git a/support/supportedversion/version.go b/support/supportedversion/version.go index 2d3b08b57d..1d28f4e7bd 100644 --- a/support/supportedversion/version.go +++ b/support/supportedversion/version.go @@ -119,7 +119,7 @@ func LookupLatestSupportedRelease(ctx context.Context, hc *hyperv1.HostedCluster minSupportedVersion := GetMinSupportedVersion(hc) prefix := "https://multi.ocp.releases.ci.openshift.org/api/v1/releasestream/4-stable-multi/latest" - filter := fmt.Sprintf("in=>4.%d.%d+<+4.%d.0", + filter := fmt.Sprintf("in=>4.%d.%d+<+4.%d.0-a", minSupportedVersion.Minor, minSupportedVersion.Patch, LatestSupportedVersion.Minor+1) releaseURL := fmt.Sprintf("%s?%s", prefix, filter) diff --git a/support/util/containers.go b/support/util/containers.go index 2754fb6aee..a182ce545c 100644 --- a/support/util/containers.go +++ b/support/util/containers.go @@ -27,6 +27,9 @@ const ( // CPOImageName is the name under which components can find the CPO image in the release image.. CPOImageName = "controlplane-operator" + // CPPKIOImageName is the name under which components can find the CP PKI Operator image in the release image.. + CPPKIOImageName = "controlplane-pki-operator" + // AvailabilityProberImageName is the name under which components can find the availability prober // image in the release image. AvailabilityProberImageName = "availability-prober" diff --git a/support/util/expose.go b/support/util/expose.go index c453ef4331..739d8c34a2 100644 --- a/support/util/expose.go +++ b/support/util/expose.go @@ -13,6 +13,11 @@ func ServicePublishingStrategyByTypeForHCP(hcp *hyperv1.HostedControlPlane, svcT return nil } +func IsLBKAS(hcp *hyperv1.HostedControlPlane) bool { + apiServerService := ServicePublishingStrategyByTypeForHCP(hcp, hyperv1.APIServer) + return apiServerService != nil && apiServerService.Type == hyperv1.LoadBalancer +} + func IsRouteKAS(hcp *hyperv1.HostedControlPlane) bool { apiServerService := ServicePublishingStrategyByTypeForHCP(hcp, hyperv1.APIServer) return apiServerService != nil && apiServerService.Type == hyperv1.Route @@ -35,6 +40,11 @@ func ServicePublishingStrategyByTypeByHC(hc *hyperv1.HostedCluster, svcType hype return nil } +func IsLBKASByHC(hc *hyperv1.HostedCluster) bool { + apiServerService := ServicePublishingStrategyByTypeByHC(hc, hyperv1.APIServer) + return apiServerService != nil && apiServerService.Type == hyperv1.LoadBalancer +} + func UseDedicatedDNSForKASByHC(hc *hyperv1.HostedCluster) bool { apiServerService := ServicePublishingStrategyByTypeByHC(hc, hyperv1.APIServer) return apiServerService != nil && apiServerService.Type == hyperv1.Route && diff --git a/support/util/expose_test.go b/support/util/expose_test.go new file mode 100644 index 0000000000..090a26a31b --- /dev/null +++ b/support/util/expose_test.go @@ -0,0 +1,92 @@ +package util + +import ( + "testing" + + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" +) + +func TestIsLBKASByHC(t *testing.T) { + tests := []struct { + description string + hc *hyperv1.HostedCluster + expected bool + }{ + { + description: "hc.spec.services is an empty array", + hc: &hyperv1.HostedCluster{ + Spec: hyperv1.HostedClusterSpec{ + Services: []hyperv1.ServicePublishingStrategyMapping{}, + }, + }, + expected: false, + }, + { + description: "hc.spec.services does not contain an entry for KAS", + hc: &hyperv1.HostedCluster{ + Spec: hyperv1.HostedClusterSpec{ + Services: []hyperv1.ServicePublishingStrategyMapping{ + { + Service: hyperv1.OAuthServer, + ServicePublishingStrategy: hyperv1.ServicePublishingStrategy{ + Type: hyperv1.Route, + }, + }, + }, + }, + }, + expected: false, + }, + { + description: "hc.spec.services contains an LB KAS entry", + hc: &hyperv1.HostedCluster{ + Spec: hyperv1.HostedClusterSpec{ + Services: []hyperv1.ServicePublishingStrategyMapping{ + { + Service: hyperv1.OAuthServer, + ServicePublishingStrategy: hyperv1.ServicePublishingStrategy{ + Type: hyperv1.Route, + }, + }, + { + Service: hyperv1.APIServer, + ServicePublishingStrategy: hyperv1.ServicePublishingStrategy{ + Type: hyperv1.LoadBalancer, + }, + }, + }, + }, + }, + expected: true, + }, + { + description: "hc.spec.services contains a Route KAS entry", + hc: &hyperv1.HostedCluster{ + Spec: hyperv1.HostedClusterSpec{ + Services: []hyperv1.ServicePublishingStrategyMapping{ + { + Service: hyperv1.OAuthServer, + ServicePublishingStrategy: hyperv1.ServicePublishingStrategy{ + Type: hyperv1.Route, + }, + }, + { + Service: hyperv1.APIServer, + ServicePublishingStrategy: hyperv1.ServicePublishingStrategy{ + Type: hyperv1.Route, + }, + }, + }, + }, + }, + expected: false, + }, + } + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + if res := IsLBKASByHC(test.hc); res != test.expected { + t.Errorf("IsLBKASByHC() = %v, expected %v", res, test.expected) + } + }) + } +} diff --git a/support/util/imagemetadata.go b/support/util/imagemetadata.go index f956dee960..511b92d9f6 100644 --- a/support/util/imagemetadata.go +++ b/support/util/imagemetadata.go @@ -45,23 +45,34 @@ type RegistryClientImageMetadataProvider struct { func (r *RegistryClientImageMetadataProvider) ImageMetadata(ctx context.Context, imageRef string, pullSecret []byte) (*dockerv1client.DockerImageConfig, error) { log := ctrl.LoggerFrom(ctx) - var repo distribution.Repository - var ref *reference.DockerImageReference - - parsedImageRef, err := reference.Parse(imageRef) + var ( + repo distribution.Repository + ref *reference.DockerImageReference + parsedImageRef reference.DockerImageReference + err error + overrideFound bool + ) + + parsedImageRef, err = reference.Parse(imageRef) if err != nil { return nil, fmt.Errorf("failed to parse image reference %q: %w", imageRef, err) } - // If the image reference contains a digest, immediately look it up in the cache - if parsedImageRef.ID != "" { - if imageConfigObject, exists := imageMetadataCache.Get(parsedImageRef.ID); exists { - return imageConfigObject.(*dockerv1client.DockerImageConfig), nil + // There are no ICSPs/IDMSs to process. + // That means the image reference should be pulled from the external registry + if len(r.OpenShiftImageRegistryOverrides) == 0 { + parsedImageRef, err = reference.Parse(imageRef) + if err != nil { + return nil, fmt.Errorf("failed to parse image reference %q: %w", imageRef, err) + } + + // If the image reference contains a digest, immediately look it up in the cache + if parsedImageRef.ID != "" { + if imageConfigObject, exists := imageMetadataCache.Get(parsedImageRef.ID); exists { + return imageConfigObject.(*dockerv1client.DockerImageConfig), nil + } } - } - // There are no ICSPs/IDMSs to process before trying to get the image repo info - if len(r.OpenShiftImageRegistryOverrides) == 0 { ref = &parsedImageRef repo, err = getRepository(ctx, *ref, pullSecret) if err != nil { @@ -72,33 +83,28 @@ func (r *RegistryClientImageMetadataProvider) ImageMetadata(ctx context.Context, // Get the image repo info based the source/mirrors in the ICSPs/IDMSs for source, mirrors := range r.OpenShiftImageRegistryOverrides { for _, mirror := range mirrors { - ref, err = GetRegistryOverrides(ctx, parsedImageRef, source, mirror) + ref, overrideFound, err = GetRegistryOverrides(ctx, parsedImageRef, source, mirror) if err != nil { log.Info(fmt.Sprintf("failed to find registry override for image reference %q with source, %s, mirror %s: %s", imageRef, source, mirror, err.Error())) continue } - - // If the image reference contains a digest, immediately look it up in the cache - if ref.ID != "" { - if imageConfigObject, exists := imageMetadataCache.Get(ref.ID); exists { - return imageConfigObject.(*dockerv1client.DockerImageConfig), nil - } - } - - repo, err = getRepository(ctx, *ref, pullSecret) - if err != nil { - log.Info(fmt.Sprintf("failed to create repository client for %s with source, %s, mirror %s: %s", ref.DockerClientDefaults().RegistryURL(), source, mirror, err.Error())) - continue - } break } // We found a successful source/mirror combo so break continuing any further source/mirror combos - if repo != nil { + if overrideFound { break } } - if repo == nil { + // If the image reference contains a digest, immediately look it up in the cache + if ref.ID != "" { + if imageConfigObject, exists := imageMetadataCache.Get(ref.ID); exists { + return imageConfigObject.(*dockerv1client.DockerImageConfig), nil + } + } + + repo, err = getRepository(ctx, *ref, pullSecret) + if err != nil || repo == nil { return nil, fmt.Errorf("failed to create repository client for %s: %w", ref.DockerClientDefaults().RegistryURL(), err) } @@ -155,25 +161,24 @@ func HCControlPlaneReleaseImage(hcluster *hyperv1.HostedCluster) string { return hcluster.Spec.Release.Image } -func GetRegistryOverrides(ctx context.Context, ref reference.DockerImageReference, source string, mirror string) (*reference.DockerImageReference, error) { +func GetRegistryOverrides(ctx context.Context, ref reference.DockerImageReference, source string, mirror string) (*reference.DockerImageReference, bool, error) { log := ctrl.LoggerFrom(ctx) sourceRef, err := reference.Parse(source) if err != nil { - return nil, fmt.Errorf("failed to parse source image reference %q: %w", source, err) + return nil, false, fmt.Errorf("failed to parse source image reference %q: %w", source, err) } - if sourceRef.Name == ref.Name { + if sourceRef.Namespace == ref.Namespace && sourceRef.Name == ref.Name { log.Info("registry override coincidence found", "original", fmt.Sprintf("%s/%s/%s", ref.Registry, ref.Namespace, ref.Name), "mirror", mirror) mirrorRef, err := reference.Parse(mirror) if err != nil { - return nil, fmt.Errorf("failed to parse mirror image reference %q: %w", mirrorRef.Name, err) + return nil, false, fmt.Errorf("failed to parse mirror image reference %q: %w", mirrorRef.Name, err) } - return &mirrorRef, nil + return &mirrorRef, true, nil } - log.Info("registry override coincidence not found", "image", ref.Name) - return &ref, nil + return &ref, false, nil } func GetPayloadImage(ctx context.Context, releaseImageProvider releaseinfo.Provider, hc *hyperv1.HostedCluster, component string, pullSecret []byte) (string, error) { diff --git a/support/util/imagemetadata_test.go b/support/util/imagemetadata_test.go index 7e7e59fb6b..096f54be25 100644 --- a/support/util/imagemetadata_test.go +++ b/support/util/imagemetadata_test.go @@ -17,6 +17,7 @@ func TestGetRegistryOverrides(t *testing.T) { mirror string expectedImgRef *reference.DockerImageReference expectAnErr bool + overrideFound bool }{ { name: "if failed to parse source image", @@ -30,6 +31,7 @@ func TestGetRegistryOverrides(t *testing.T) { mirror: "", expectedImgRef: nil, expectAnErr: true, + overrideFound: false, }, { name: "if registry override coincidence not found", @@ -39,15 +41,16 @@ func TestGetRegistryOverrides(t *testing.T) { Namespace: "openshift-release-dev", Tag: "4.14.0-rc.0-multi", }, - source: "quay.io/openshift-release-dev/ocp-release:4.14.0-rc.0-multi", - mirror: "my_registry/openshift-release-dev/ocp-release:4.14.0-rc.0-multi", + source: "quay.io/openshift-release-dev/ocp-release:4.15.0-rc.0-multi", + mirror: "myregistry.io/openshift-release-dev/ocp-release:4.15.0-rc.0-multi", expectedImgRef: &reference.DockerImageReference{ Registry: "quay.io", Name: "ocp", Namespace: "openshift-release-dev", Tag: "4.14.0-rc.0-multi", }, - expectAnErr: false, + expectAnErr: false, + overrideFound: false, }, { name: "if registry override coincidence is found", @@ -57,23 +60,25 @@ func TestGetRegistryOverrides(t *testing.T) { Namespace: "openshift-release-dev", Tag: "4.14.0-rc.0-multi", }, - source: "quay.io/openshift-release-dev/ocp-release:4.14.0-rc.0-multi", - mirror: "my_registry/openshift-release-dev/ocp-release:4.14.0-rc.0-multi", + source: "quay.io/openshift-release-dev/ocp-release:4.15.0-rc.0-multi", + mirror: "myregistry.io/openshift-release-dev/ocp-release:4.15.0-rc.0-multi", expectedImgRef: &reference.DockerImageReference{ - Registry: "", - Name: "openshift-release-dev/ocp-release", - Namespace: "my_registry", - Tag: "4.14.0-rc.0-multi", + Registry: "myregistry.io", + Name: "ocp-release", + Namespace: "openshift-release-dev", + Tag: "4.15.0-rc.0-multi", }, - expectAnErr: false, + expectAnErr: false, + overrideFound: true, }, } for _, tc := range testsCases { t.Run(tc.name, func(t *testing.T) { g := NewGomegaWithT(t) - imgRef, err := GetRegistryOverrides(ctx, tc.ref, tc.source, tc.mirror) + imgRef, overrideFound, err := GetRegistryOverrides(ctx, tc.ref, tc.source, tc.mirror) g.Expect(imgRef).To(Equal(tc.expectedImgRef)) g.Expect(err != nil).To(Equal(tc.expectAnErr)) + g.Expect(overrideFound).To(Equal(tc.overrideFound)) }) } } diff --git a/support/util/networking.go b/support/util/networking.go index 78f6c666e7..e2806f10e6 100644 --- a/support/util/networking.go +++ b/support/util/networking.go @@ -78,6 +78,15 @@ func KASPodPortFromHostedCluster(hc *hyperv1.HostedCluster) int32 { return 6443 } +// APIPortForLocalZone returns the port used by processes within a private hosted cluster +// to communicate with the KAS via the api..hypershift.local host. +func APIPortForLocalZone(isLBKAS bool) int32 { + if isLBKAS { + return 6443 + } + return 443 +} + func AdvertiseAddress(hcp *hyperv1.HostedControlPlane) *string { if hcp != nil && hcp.Spec.Networking.APIServer != nil { return hcp.Spec.Networking.APIServer.AdvertiseAddress @@ -101,8 +110,14 @@ func AllowedCIDRBlocks(hcp *hyperv1.HostedControlPlane) []hyperv1.CIDRBlock { func GetAdvertiseAddress(hcp *hyperv1.HostedControlPlane, ipv4DefaultAddress, ipv6DefaultAddress string) string { var advertiseAddress string + var ipv4 bool + var err error - ipv4, err := IsIPv4(hcp.Spec.Networking.ServiceNetwork[0].CIDR.String()) + if len(hcp.Spec.Networking.ServiceNetwork) > 0 { + ipv4, err = IsIPv4(hcp.Spec.Networking.ServiceNetwork[0].CIDR.String()) + } else { + ipv4 = true + } if err != nil || ipv4 { if address := AdvertiseAddressWithDefault(hcp, ipv4DefaultAddress); len(address) > 0 { advertiseAddress = address diff --git a/support/util/networking_test.go b/support/util/networking_test.go index 1699f64d83..0c1db2b02a 100644 --- a/support/util/networking_test.go +++ b/support/util/networking_test.go @@ -62,6 +62,17 @@ func TestGetAdvertiseAddress(t *testing.T) { }, want: DefaultAdvertiseIPv6Address, }, + { + name: "given no ServiceNetwork CIDR in the HCP, it should return IPv4 based default address", + hcp: &hyperv1.HostedControlPlane{ + Spec: hyperv1.HostedControlPlaneSpec{ + Networking: hyperv1.ClusterNetworking{ + ServiceNetwork: []hyperv1.ServiceNetworkEntry{}, + }, + }, + }, + want: DefaultAdvertiseIPv4Address, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/support/util/pdb.go b/support/util/pdb.go new file mode 100644 index 0000000000..43cbb1233b --- /dev/null +++ b/support/util/pdb.go @@ -0,0 +1,22 @@ +package util + +import ( + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" +) + +func ReconcilePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget, availability hyperv1.AvailabilityPolicy) { + var minAvailable *intstr.IntOrString + var maxUnavailable *intstr.IntOrString + switch availability { + case hyperv1.SingleReplica: + minAvailable = ptr.To(intstr.FromInt(1)) + case hyperv1.HighlyAvailable: + maxUnavailable = ptr.To(intstr.FromInt(1)) + } + pdb.Spec.MinAvailable = minAvailable + pdb.Spec.MaxUnavailable = maxUnavailable +} diff --git a/support/util/route.go b/support/util/route.go index 63c55d5795..491bf259f2 100644 --- a/support/util/route.go +++ b/support/util/route.go @@ -86,9 +86,11 @@ func hash(s string) string { return result } -func ReconcileExternalRoute(route *routev1.Route, hostname string, defaultIngressDomain string, serviceName string) error { - if hostname != "" { +func ReconcileExternalRoute(route *routev1.Route, hostname string, defaultIngressDomain string, serviceName string, labelHCPRoutes bool) error { + if labelHCPRoutes { AddHCPRouteLabel(route) + } + if hostname != "" { route.Spec.Host = hostname } else { if route.Spec.Host == "" { diff --git a/support/util/util.go b/support/util/util.go index fc079a2275..1c4a73e18a 100644 --- a/support/util/util.go +++ b/support/util/util.go @@ -6,26 +6,35 @@ import ( "context" "crypto/tls" "encoding/base64" + "encoding/json" "fmt" "hash/fnv" "io" "net" "net/http" + "os" "sort" "strings" "time" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + + ignitionapi "github.com/coreos/ignition/v2/config/v3_2/types" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" ) const ( // DebugDeploymentsAnnotation contains a comma separated list of deployment names which should always be scaled to 0 // for development. - DebugDeploymentsAnnotation = "hypershift.openshift.io/debug-deployments" + DebugDeploymentsAnnotation = "hypershift.openshift.io/debug-deployments" + EnableHostedClustersAnnotationScopingEnv = "ENABLE_HOSTEDCLUSTERS_ANNOTATION_SCOPING" + HostedClustersScopeAnnotationEnv = "HOSTEDCLUSTERS_SCOPE_ANNOTATION" + HostedClustersScopeAnnotation = "hypershift.openshift.io/scope" + HostedClusterAnnotation = "hypershift.openshift.io/cluster" ) // ParseNamespacedName expects a string with the format "namespace/name" @@ -190,14 +199,29 @@ func InsecureHTTPClient() *http.Client { } } -// HashStruct takes a value, typically a string, and returns a 32-bit FNV-1a hashed version of the value as a string -func HashStruct(o interface{}) string { +// HashSimple takes a value, typically a string, and returns a 32-bit FNV-1a hashed version of the value as a string +func HashSimple(o interface{}) string { hash := fnv.New32a() _, _ = hash.Write([]byte(fmt.Sprintf("%v", o))) intHash := hash.Sum32() return fmt.Sprintf("%08x", intHash) } +// HashStruct takes a struct and returns a 32-bit FNV-1a hashed version of the struct as a string +// The struct is first marshalled to JSON before hashing +func HashStruct(data interface{}) (string, error) { + hash := fnv.New32a() + jsonData, err := json.Marshal(data) + if err != nil { + return "", err + } + _, err = hash.Write(jsonData) + if err != nil { + return "", err + } + return fmt.Sprintf("%08x", hash.Sum32()), nil +} + // ConvertRegistryOverridesToCommandLineFlag converts a map of registry sources and their mirrors into a string func ConvertRegistryOverridesToCommandLineFlag(registryOverrides map[string]string) string { var commandLineFlagArray []string @@ -285,3 +309,110 @@ func FirstUsableIP(cidr string) (string, error) { ip[len(ipNet.IP)-1]++ return ip.String(), nil } + +// ParseNodeSelector parses a comma separated string of key=value pairs into a map +func ParseNodeSelector(str string) map[string]string { + if len(str) == 0 { + return nil + } + parts := strings.Split(str, ",") + result := make(map[string]string) + for _, part := range parts { + kv := strings.SplitN(part, "=", 2) + if len(kv) != 2 { + continue + } + if len(kv[0]) == 0 || len(kv[1]) == 0 { + continue + } + result[kv[0]] = kv[1] + } + return result +} + +// PredicatesForHostedClusterAnnotationScoping returns predicate filters for all event types that will ignore incoming +// event requests for resources in which the parent hostedcluster does not +// match the "scope" annotation specified in the HOSTEDCLUSTERS_SCOPE_ANNOTATION env var. If not defined or empty, the +// default behavior is to accept all events for hostedclusters that do not have the annotation. +// The ENABLE_HOSTEDCLUSTERS_ANNOTATION_SCOPING env var must also be set to "true" to enable the scoping feature. +func PredicatesForHostedClusterAnnotationScoping(r client.Reader) predicate.Predicate { + hcAnnotationScopingEnabledEnvVal := os.Getenv(EnableHostedClustersAnnotationScopingEnv) + hcScopeAnnotationEnvVal := os.Getenv(HostedClustersScopeAnnotationEnv) + filter := func(obj client.Object) bool { + if hcAnnotationScopingEnabledEnvVal != "true" { + return true // process event; the scoping feature has not been enabled via the ENABLE_HOSTEDCLUSTERS_ANNOTATION_SCOPING env var + } + hostedClusterScopeAnnotation := getHostedClusterScopeAnnotation(obj, r) + if hostedClusterScopeAnnotation == "" && hcScopeAnnotationEnvVal == "" { + return true // process event; both the operator's scope and hostedcluster's scope are empty + } + if hostedClusterScopeAnnotation != hcScopeAnnotationEnvVal { + return false // ignore event; the associated hostedcluster's scope annotation does not match what is defined in HOSTEDCLUSTERS_SCOPE_ANNOTATION + } + return true + } + return predicate.NewPredicateFuncs(filter) +} + +// getHostedClusterScopeAnnotation will extract the "scope" annotation from the hostedcluster resource that owns the specified object. +// Depending on the object type being passed in, slightly different paths will be used to ultimately retrieve the hostedcluster resource containing the annotation. +// If an annotation is not found, an empty string is returned. +func getHostedClusterScopeAnnotation(obj client.Object, r client.Reader) string { + hostedClusterName := "" + nodePoolName := "" + switch obj.(type) { + case *hyperv1.HostedCluster: + hc, ok := obj.(*hyperv1.HostedCluster) + if !ok { + return "" + } + if hc.GetAnnotations() != nil { + return hc.GetAnnotations()[HostedClustersScopeAnnotation] + } + case *hyperv1.NodePool: + np, ok := obj.(*hyperv1.NodePool) + if !ok { + return "" + } + hostedClusterName = fmt.Sprintf("%s/%s", np.Namespace, np.Spec.ClusterName) + default: + if obj.GetAnnotations() != nil { + nodePoolName = obj.GetAnnotations()["hypershift.openshift.io/nodePool"] + hostedClusterName = obj.GetAnnotations()[HostedClusterAnnotation] + } + if nodePoolName != "" { + namespacedName := ParseNamespacedName(nodePoolName) + np := &hyperv1.NodePool{} + err := r.Get(context.Background(), namespacedName, np) + if err != nil { + return "" + } + hostedClusterName = fmt.Sprintf("%s/%s", np.Namespace, np.Spec.ClusterName) + } + } + if hostedClusterName == "" { + return "" + } + namespacedName := ParseNamespacedName(hostedClusterName) + hcluster := &hyperv1.HostedCluster{} + err := r.Get(context.Background(), namespacedName, hcluster) + if err != nil { + return "" + } + if hcluster.GetAnnotations() != nil { + return hcluster.GetAnnotations()[HostedClustersScopeAnnotation] + } + return "" +} + +// SanitizeIgnitionPayload make sure the IgnitionPayload is valid +// and does not contain inconsistencies. +func SanitizeIgnitionPayload(payload []byte) error { + var jsonPayload ignitionapi.Config + + if err := json.Unmarshal(payload, &jsonPayload); err != nil { + return fmt.Errorf("error unmarshalling Ignition payload: %v", err) + } + + return nil +} diff --git a/support/util/util_test.go b/support/util/util_test.go index 8670f9f80f..cc0aba4bdd 100644 --- a/support/util/util_test.go +++ b/support/util/util_test.go @@ -345,3 +345,111 @@ func TestFirstUsableIP(t *testing.T) { }) } } + +func TestParseNodeSelector(t *testing.T) { + tests := []struct { + name string + str string + want map[string]string + }{ + { + name: "Given a valid node selector string, it should return a map of key value pairs", + str: "key1=value1,key2=value2,key3=value3", + want: map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + }, + }, + { + name: "Given a valid node selector string with empty values, it should return a map of key value pairs", + str: "key1=,key2=value2,key3=", + want: map[string]string{ + "key2": "value2", + }, + }, + { + name: "Given a valid node selector string with empty keys, it should return a map of key value pairs", + str: "=value1,key2=value2,=value3", + want: map[string]string{ + "key2": "value2", + }, + }, + { + name: "Given a valid node selector string with empty string, it should return an empty map", + str: "", + want: nil, + }, + { + name: "Given a valid node selector string with invalid key value pairs, it should return a map of key value pairs", + str: "key1=value1,key2,key3=value3", + want: map[string]string{ + "key1": "value1", + "key3": "value3", + }, + }, + { + name: "Given a valid node selector string with values that include =, it should return a map of key value pairs", + str: "key1=value1=one,key2,key3=value3=three", + want: map[string]string{ + "key1": "value1=one", + "key3": "value3=three", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got := ParseNodeSelector(tt.str) + g.Expect(got).To(Equal(tt.want)) + }) + } +} + +func TestSanitizeIgnitionPayload(t *testing.T) { + tests := []struct { + name string + payload []byte + wantErr bool + }{ + { + name: "Simple valid Ignition payload", + payload: []byte(`{"ignition": {"version": "3.0.0"}}`), + wantErr: false, + }, + { + name: "More complex valid Ignition payload", + payload: []byte(`{"ignition":{"version":"3.0.0"},"storage":{"files":[{"path":"/etc/someconfig","mode":420,"contents":{"source":"data:,example%20file%0A"}}]}}`), + wantErr: false, + }, + { + name: "Simple invalid Ignition payload (missing closing brace)", + payload: []byte(`{"ignition": {"version": "3.0.0"`), + wantErr: true, + }, + { + name: "Empty payload", + payload: []byte(``), + wantErr: true, + }, + { + name: "Nil payload", + payload: nil, + wantErr: true, + }, + } + + t.Parallel() + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + err := SanitizeIgnitionPayload(tt.payload) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + }) + } +} diff --git a/support/util/volumes.go b/support/util/volumes.go index 26429a084e..9928d84baf 100644 --- a/support/util/volumes.go +++ b/support/util/volumes.go @@ -10,6 +10,11 @@ func BuildVolume(volume *corev1.Volume, buildFn func(*corev1.Volume)) corev1.Vol return *volume } +func BuildProjectedVolume(volume *corev1.Volume, volumeProjection []corev1.VolumeProjection, buildFn func(*corev1.Volume, []corev1.VolumeProjection)) corev1.Volume { + buildFn(volume, volumeProjection) + return *volume +} + func DeploymentAddTrustBundleVolume(trustBundleConfigMap *corev1.LocalObjectReference, deployment *appsv1.Deployment) { deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ Name: "trusted-ca", diff --git a/test/e2e/create_cluster_test.go b/test/e2e/create_cluster_test.go index e2d7789321..402fb75742 100644 --- a/test/e2e/create_cluster_test.go +++ b/test/e2e/create_cluster_test.go @@ -8,21 +8,13 @@ import ( "fmt" "strings" "testing" - "time" . "github.com/onsi/gomega" hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" "github.com/openshift/hypershift/cmd/cluster/core" - pkimanifests "github.com/openshift/hypershift/control-plane-pki-operator/manifests" - "github.com/openshift/hypershift/hypershift-operator/controllers/manifests" e2eutil "github.com/openshift/hypershift/test/e2e/util" - authenticationv1 "k8s.io/api/authentication/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" + "github.com/openshift/hypershift/test/integration" + integrationframework "github.com/openshift/hypershift/test/integration/framework" "k8s.io/client-go/tools/clientcmd" crclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -46,50 +38,32 @@ func TestCreateCluster(t *testing.T) { } e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) { - t.Run("break-glass-credentials", func(t *testing.T) { - // Sanity check the cluster by waiting for the nodes to report ready - t.Logf("Waiting for guest client to become available") - _ = e2eutil.WaitForGuestClient(t, ctx, mgtClient, hostedCluster) - - hostedControlPlaneNamespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) - - // Grab the break-glass client certificate - clientCertificate := pkimanifests.CustomerSystemAdminClientCertSecret(hostedControlPlaneNamespace) - if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 3*time.Minute, true, func(ctx context.Context) (done bool, err error) { - getErr := mgtClient.Get(ctx, crclient.ObjectKeyFromObject(clientCertificate), clientCertificate) - if errors.IsNotFound(getErr) { - return false, nil - } - return getErr == nil, err - }); err != nil { - t.Fatalf("client cert didn't become available: %v", err) - } - - guestKubeConfigSecretData, err := e2eutil.WaitForGuestKubeConfig(t, ctx, mgtClient, hostedCluster) - g.Expect(err).NotTo(HaveOccurred(), "couldn't get kubeconfig") - - guestConfig, err := clientcmd.RESTConfigFromKubeConfig(guestKubeConfigSecretData) - g.Expect(err).NotTo(HaveOccurred(), "couldn't load guest kubeconfig") - - // amend the existing kubeconfig to use our client certificate - certConfig := rest.AnonymousClientConfig(guestConfig) - certConfig.TLSClientConfig.CertData = clientCertificate.Data["tls.crt"] - certConfig.TLSClientConfig.KeyData = clientCertificate.Data["tls.key"] - - client, err := kubernetes.NewForConfig(certConfig) - if err != nil { - t.Fatalf("could not create client: %v", err) - } - - response, err := client.AuthenticationV1().SelfSubjectReviews().Create(context.Background(), &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) - if err != nil { - t.Fatalf("could not send SSAR: %v", err) - } - - if !sets.New[string](response.Status.UserInfo.Groups...).Has("system:masters") || !strings.HasPrefix(response.Status.UserInfo.Username, "customer-break-glass-") { - t.Fatalf("did not get correct SSAR response: %#v", response) - } - }) + // Sanity check the cluster by waiting for the nodes to report ready + t.Logf("Waiting for guest client to become available") + _ = e2eutil.WaitForGuestClient(t, ctx, mgtClient, hostedCluster) + + t.Logf("fetching mgmt kubeconfig") + mgmtCfg, err := e2eutil.GetConfig() + g.Expect(err).NotTo(HaveOccurred(), "couldn't get mgmt kubeconfig") + mgmtCfg.QPS = -1 + mgmtCfg.Burst = -1 + + mgmtClients, err := integrationframework.NewClients(mgmtCfg) + g.Expect(err).NotTo(HaveOccurred(), "couldn't create mgmt clients") + + t.Logf("fetching guest kubeconfig") + guestKubeConfigSecretData, err := e2eutil.WaitForGuestKubeConfig(t, ctx, mgtClient, hostedCluster) + g.Expect(err).NotTo(HaveOccurred(), "couldn't get guest kubeconfig") + + guestConfig, err := clientcmd.RESTConfigFromKubeConfig(guestKubeConfigSecretData) + g.Expect(err).NotTo(HaveOccurred(), "couldn't load guest kubeconfig") + guestConfig.QPS = -1 + guestConfig.Burst = -1 + + guestClients, err := integrationframework.NewClients(guestConfig) + g.Expect(err).NotTo(HaveOccurred(), "couldn't create guest clients") + + integration.RunTestControlPlanePKIOperatorBreakGlassCredentials(t, testContext, hostedCluster, mgmtClients, guestClients) }). Execute(&clusterOpts, globalOpts.Platform, globalOpts.ArtifactDir, globalOpts.ServiceAccountSigningKey) } @@ -125,7 +99,6 @@ func TestCreateClusterRequestServingIsolation(t *testing.T) { e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) { guestClient := e2eutil.WaitForGuestClient(t, testContext, mgtClient, hostedCluster) - e2eutil.EnsurePSANotPrivileged(t, ctx, guestClient) e2eutil.EnsureAllReqServingPodsLandOnReqServingNodes(t, ctx, guestClient) e2eutil.EnsureOnlyRequestServingPodsOnRequestServingNodes(t, ctx, guestClient) e2eutil.EnsureNoHCPPodsLandOnDefaultNode(t, ctx, guestClient, hostedCluster) @@ -198,10 +171,21 @@ func TestCreateClusterProxy(t *testing.T) { Execute(&clusterOpts, globalOpts.Platform, globalOpts.ArtifactDir, globalOpts.ServiceAccountSigningKey) } -// TestCreateClusterPrivate implements a smoke test that creates a private cluster. +func TestCreateClusterPrivate(t *testing.T) { + testCreateClusterPrivate(t, false) +} + +func TestCreateClusterPrivateWithRouteKAS(t *testing.T) { + testCreateClusterPrivate(t, true) +} + +// testCreateClusterPrivate implements a smoke test that creates a private cluster. // Validations requiring guest cluster client are dropped here since the kas is not accessible when private. // In the future we might want to leverage https://issues.redhat.com/browse/HOSTEDCP-697 to access guest cluster. -func TestCreateClusterPrivate(t *testing.T) { +func testCreateClusterPrivate(t *testing.T, enableExternalDNS bool) { + if globalOpts.Platform != hyperv1.AWSPlatform { + t.Skip("test only supported on platform AWS") + } t.Parallel() ctx, cancel := context.WithCancel(testContext) @@ -210,24 +194,45 @@ func TestCreateClusterPrivate(t *testing.T) { clusterOpts := globalOpts.DefaultClusterOptions(t) clusterOpts.ControlPlaneAvailabilityPolicy = string(hyperv1.SingleReplica) clusterOpts.AWSPlatform.EndpointAccess = string(hyperv1.Private) + expectGuestKubeconfHostChange := false + if !enableExternalDNS { + clusterOpts.ExternalDNSDomain = "" + expectGuestKubeconfHostChange = true + } e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) { // Private -> publicAndPrivate - t.Run("SwitchFromPrivateToPublic", testSwitchFromPrivateToPublic(ctx, mgtClient, hostedCluster, &clusterOpts)) + t.Run("SwitchFromPrivateToPublic", testSwitchFromPrivateToPublic(ctx, mgtClient, hostedCluster, &clusterOpts, expectGuestKubeconfHostChange)) // publicAndPrivate -> Private t.Run("SwitchFromPublicToPrivate", testSwitchFromPublicToPrivate(ctx, mgtClient, hostedCluster, &clusterOpts)) }).Execute(&clusterOpts, globalOpts.Platform, globalOpts.ArtifactDir, globalOpts.ServiceAccountSigningKey) } -func testSwitchFromPrivateToPublic(ctx context.Context, client crclient.Client, hostedCluster *hyperv1.HostedCluster, clusterOpts *core.CreateOptions) func(t *testing.T) { +func testSwitchFromPrivateToPublic(ctx context.Context, client crclient.Client, hostedCluster *hyperv1.HostedCluster, clusterOpts *core.CreateOptions, expectGuestKubeconfHostChange bool) func(t *testing.T) { return func(t *testing.T) { g := NewWithT(t) - err := e2eutil.UpdateObject(t, ctx, client, hostedCluster, func(obj *hyperv1.HostedCluster) { + var ( + host string + err error + ) + if expectGuestKubeconfHostChange { + // Get guest kubeconfig host before switching endpoint access + host, err = e2eutil.GetGuestKubeconfigHost(t, ctx, client, hostedCluster) + g.Expect(err).ToNot(HaveOccurred(), "failed to get guest kubeconfig host") + t.Logf("Found guest kubeconfig host before switching endpoint access: %s", host) + } + + // Switch to PublicAndPrivate endpoint access + err = e2eutil.UpdateObject(t, ctx, client, hostedCluster, func(obj *hyperv1.HostedCluster) { obj.Spec.Platform.AWS.EndpointAccess = hyperv1.PublicAndPrivate }) g.Expect(err).ToNot(HaveOccurred(), "failed to update hostedcluster EndpointAccess") + if expectGuestKubeconfHostChange { + e2eutil.WaitForGuestKubeconfigHostUpdate(t, ctx, client, hostedCluster, host) + } + e2eutil.ValidatePublicCluster(t, ctx, client, hostedCluster, clusterOpts) } } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index bc5ef4073b..60a88e34da 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -93,7 +93,7 @@ func TestMain(m *testing.M) { flag.StringVar(&globalOpts.configurableClusterOptions.AzureLocation, "e2e.azure-location", "eastus", "The location to use for Azure") flag.StringVar(&globalOpts.configurableClusterOptions.SSHKeyFile, "e2e.ssh-key-file", "", "Path to a ssh public key") flag.StringVar(&globalOpts.platformRaw, "e2e.platform", string(hyperv1.AWSPlatform), "The platform to use for the tests") - flag.StringVar(&globalOpts.configurableClusterOptions.NetworkType, "network-type", "", "The network type to use. If unset, will default based on the OCP version.") + flag.StringVar(&globalOpts.configurableClusterOptions.NetworkType, "network-type", string(hyperv1.OVNKubernetes), "The network type to use. If unset, will default based on the OCP version.") flag.StringVar(&globalOpts.configurableClusterOptions.PowerVSResourceGroup, "e2e.powervs-resource-group", "", "IBM Cloud Resource group") flag.StringVar(&globalOpts.configurableClusterOptions.PowerVSRegion, "e2e.powervs-region", "us-south", "IBM Cloud region. Default is us-south") flag.StringVar(&globalOpts.configurableClusterOptions.PowerVSZone, "e2e.powervs-zone", "us-south", "IBM Cloud zone. Default is us-sout") @@ -105,12 +105,12 @@ func TestMain(m *testing.M) { flag.StringVar(&globalOpts.configurableClusterOptions.PowerVSCloudInstanceID, "e2e-powervs-cloud-instance-id", "", "IBM Cloud PowerVS Service Instance ID. Use this flag to reuse an existing PowerVS Service Instance resource for cluster's infra") flag.StringVar(&globalOpts.configurableClusterOptions.PowerVSCloudConnection, "e2e-powervs-cloud-connection", "", "Cloud Connection in given zone. Use this flag to reuse an existing Cloud Connection resource for cluster's infra") flag.StringVar(&globalOpts.configurableClusterOptions.PowerVSVPC, "e2e-powervs-vpc", "", "IBM Cloud VPC Name. Use this flag to reuse an existing VPC resource for cluster's infra") - flag.BoolVar(&globalOpts.SkipAPIBudgetVerification, "e2e.skip-api-budget", false, "Bool to avoid send metrics to E2E Server on local test execution.") flag.StringVar(&globalOpts.configurableClusterOptions.EtcdStorageClass, "e2e.etcd-storage-class", "", "The persistent volume storage class for etcd data volumes") flag.BoolVar(&globalOpts.RequestServingIsolation, "e2e.test-request-serving-isolation", false, "If set, TestCreate creates a cluster with request serving isolation topology") flag.StringVar(&globalOpts.ManagementParentKubeconfig, "e2e.management-parent-kubeconfig", "", "Kubeconfig of the management cluster's parent cluster (required to test request serving isolation)") flag.StringVar(&globalOpts.ManagementClusterNamespace, "e2e.management-cluster-namespace", "", "Namespace of the management cluster's HostedCluster (required to test request serving isolation)") flag.StringVar(&globalOpts.ManagementClusterName, "e2e.management-cluster-name", "", "Name of the management cluster's HostedCluster (required to test request serving isolation)") + flag.Var(&globalOpts.configurableClusterOptions.Annotations, "e2e.annotations", "Annotations to apply to the HostedCluster (key=value). Can be specified multiple times") flag.Parse() @@ -364,10 +364,6 @@ type options struct { IssuerURL string ServiceAccountSigningKey []byte - // SkipAPIBudgetVerification implies that you are executing the e2e tests - // from local to verify that them works fine before push - SkipAPIBudgetVerification bool - // If set, the CreateCluster test will create a cluster with request serving // isolation topology. RequestServingIsolation bool @@ -415,6 +411,7 @@ type configurableClusterOptions struct { PowerVSCloudConnection string PowerVSVPC string EtcdStorageClass string + Annotations stringMapVar } var nextAWSZoneIndex = 0 @@ -475,8 +472,7 @@ func (o *options) DefaultClusterOptions(t *testing.T) core.CreateOptions { fmt.Sprintf("%s=true", hyperv1.CleanupCloudResourcesAnnotation), fmt.Sprintf("%s=true", hyperv1.SkipReleaseImageValidation), }, - SkipAPIBudgetVerification: o.SkipAPIBudgetVerification, - EtcdStorageClass: o.configurableClusterOptions.EtcdStorageClass, + EtcdStorageClass: o.configurableClusterOptions.EtcdStorageClass, } // Arch is only currently valid for aws platform @@ -509,6 +505,12 @@ func (o *options) DefaultClusterOptions(t *testing.T) core.CreateOptions { createOption.SSHKeyFile = o.configurableClusterOptions.SSHKeyFile } + if o.configurableClusterOptions.Annotations != nil { + for k, v := range o.configurableClusterOptions.Annotations { + createOption.Annotations = append(createOption.Annotations, fmt.Sprintf("%s=%s", k, v)) + } + } + return createOption } @@ -594,3 +596,24 @@ type stringSliceVar []string func (s *stringSliceVar) String() string { return strings.Join(*s, ",") } func (s *stringSliceVar) Set(v string) error { *s = append(*s, strings.Split(v, ",")...); return nil } + +type stringMapVar map[string]string + +func (s *stringMapVar) String() string { + if *s == nil { + return "" + } + return fmt.Sprintf("%v", *s) +} + +func (s *stringMapVar) Set(value string) error { + split := strings.Split(value, "=") + if len(split) != 2 { + return fmt.Errorf("invalid argument: %s", value) + } + if *s == nil { + *s = map[string]string{} + } + map[string]string(*s)[split[0]] = split[1] + return nil +} diff --git a/test/e2e/nodepool_kv_multinet_test.go b/test/e2e/nodepool_kv_multinet_test.go new file mode 100644 index 0000000000..9b9d67e119 --- /dev/null +++ b/test/e2e/nodepool_kv_multinet_test.go @@ -0,0 +1,157 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "context" + "fmt" + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/utils/ptr" + kubevirtv1 "kubevirt.io/api/core/v1" + "sigs.k8s.io/cluster-api/util" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/hypershift-operator/controllers/manifests" + kvinfra "github.com/openshift/hypershift/kubevirtexternalinfra" +) + +type KubeVirtMultinetTest struct { + ctx context.Context + client crclient.Client + hostedCluster *hyperv1.HostedCluster +} + +func NewKubeVirtMultinetTest(ctx context.Context, cl crclient.Client, hc *hyperv1.HostedCluster) *KubeVirtMultinetTest { + return &KubeVirtMultinetTest{ + ctx: ctx, + client: cl, + hostedCluster: hc, + } +} + +func (k KubeVirtMultinetTest) Setup(t *testing.T) { + if globalOpts.Platform != hyperv1.KubevirtPlatform { + t.Skip("test only supported on platform KubeVirt") + } + + t.Log("Starting test KubeVirtMultinetTest") +} + +func (k KubeVirtMultinetTest) Run(t *testing.T, nodePool hyperv1.NodePool, _ []corev1.Node) { + g := NewWithT(t) + + np := &hyperv1.NodePool{} + g.Eventually(func(gg Gomega) { + gg.Expect(k.client.Get(k.ctx, util.ObjectKey(&nodePool), np)).Should(Succeed()) + gg.Expect(np.Spec.Platform).ToNot(BeNil()) + gg.Expect(np.Spec.Platform.Type).To(Equal(hyperv1.KubevirtPlatform)) + gg.Expect(np.Spec.Platform.Kubevirt).ToNot(BeNil()) + gg.Expect(np.Spec.Platform.Kubevirt.AdditionalNetworks).To(Equal([]hyperv1.KubevirtNetwork{{ + Name: k.nadNamespace() + "/net1", + }})) + }).Within(5 * time.Minute).WithPolling(time.Second).Should(Succeed()) + + localInfraNS := manifests.HostedControlPlaneNamespace(k.hostedCluster.Namespace, k.hostedCluster.Name) + var guestNamespace string + if np.Status.Platform != nil && + np.Status.Platform.KubeVirt != nil && + np.Status.Platform.KubeVirt.Credentials != nil && + len(np.Status.Platform.KubeVirt.Credentials.InfraNamespace) > 0 { + + guestNamespace = np.Status.Platform.KubeVirt.Credentials.InfraNamespace + g.Expect(np.Status.Platform.KubeVirt.Credentials.InfraKubeConfigSecret).ToNot(BeNil()) + g.Expect(np.Status.Platform.KubeVirt.Credentials.InfraKubeConfigSecret.Key).Should(Equal("kubeconfig")) + } else { + guestNamespace = localInfraNS + } + + cm := kvinfra.NewKubevirtInfraClientMap() + var creds *hyperv1.KubevirtPlatformCredentials + if np.Status.Platform != nil && np.Status.Platform.KubeVirt != nil { + creds = np.Status.Platform.KubeVirt.Credentials + } + infraClient, err := cm.DiscoverKubevirtClusterClient(k.ctx, k.client, k.hostedCluster.Spec.InfraID, creds, localInfraNS, np.GetNamespace()) + g.Expect(err).ShouldNot(HaveOccurred()) + + vmis := &kubevirtv1.VirtualMachineInstanceList{} + labelSelector := labels.SelectorFromValidatedSet(labels.Set{hyperv1.NodePoolNameLabel: np.Name}) + g.Eventually(func(gg Gomega) { + gg.Expect( + infraClient.GetInfraClient().List(k.ctx, vmis, &crclient.ListOptions{Namespace: guestNamespace, LabelSelector: labelSelector}), + ).To(Succeed()) + + gg.Expect(vmis.Items).To(HaveLen(1)) + vmi := vmis.Items[0] + // Use gomega HaveField so we can skip "Mac" matching + matchingInterface := &kubevirtv1.Interface{} + gg.Expect(vmi.Spec.Domain.Devices.Interfaces).To(ContainElement( + HaveField("Name", "iface1_"+k.nadNamespace()+"-net1"), matchingInterface), + ) + gg.Expect(matchingInterface.InterfaceBindingMethod.Bridge).ToNot(BeNil()) + gg.Expect(vmi.Spec.Networks).To(ContainElement(kubevirtv1.Network{ + Name: "iface1_" + k.nadNamespace() + "-net1", + NetworkSource: kubevirtv1.NetworkSource{ + Multus: &kubevirtv1.MultusNetwork{ + NetworkName: k.nadNamespace() + "/net1", + }, + }, + })) + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) +} + +func (k KubeVirtMultinetTest) BuildNodePoolManifest(defaultNodepool hyperv1.NodePool) (*hyperv1.NodePool, error) { + + nadYAML := fmt.Sprintf(` +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + namespace: %[1]s + name: %[2]s +spec: + config: |2 + { + "cniVersion": "0.3.1", + "name": "l2-network", + "type": "ovn-k8s-cni-overlay", + "topology":"layer2", + "netAttachDefName": "%[1]s/%[2]s" + } +`, k.nadNamespace(), "net1") + nad := &unstructured.Unstructured{Object: map[string]interface{}{}} + if err := yaml.Unmarshal([]byte(nadYAML), &nad); err != nil { + return nil, fmt.Errorf("failed unmarshaling net-attach-def: %w", err) + } + if err := k.client.Create(context.Background(), nad); err != nil { + return nil, fmt.Errorf("failed creating net-attach-def: %w", err) + } + + nodePool := &hyperv1.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: k.hostedCluster.Name + "-" + "test-kv-multinet", + Namespace: k.hostedCluster.Namespace, + }, + } + defaultNodepool.Spec.DeepCopyInto(&nodePool.Spec) + + if nodePool.Spec.Platform.Kubevirt != nil { + nodePool.Spec.Platform.Kubevirt.AdditionalNetworks = []hyperv1.KubevirtNetwork{{ + Name: k.nadNamespace() + "/net1", + }} + } + nodePool.Spec.Replicas = ptr.To(int32(1)) + return nodePool, nil +} + +func (k KubeVirtMultinetTest) nadNamespace() string { + return fmt.Sprintf("%s-%s", k.hostedCluster.Namespace, k.hostedCluster.Name) +} diff --git a/test/e2e/nodepool_kv_nodeselector_test.go b/test/e2e/nodepool_kv_nodeselector_test.go new file mode 100644 index 0000000000..5a36f1872e --- /dev/null +++ b/test/e2e/nodepool_kv_nodeselector_test.go @@ -0,0 +1,123 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "context" + "testing" + "time" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/util/retry" + "k8s.io/utils/ptr" + kubevirtv1 "kubevirt.io/api/core/v1" + crclient "sigs.k8s.io/controller-runtime/pkg/client" + + hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/hypershift-operator/controllers/manifests" + kvinfra "github.com/openshift/hypershift/kubevirtexternalinfra" +) + +type KubeVirtNodeSelectorTest struct { + ctx context.Context + client crclient.Client + hostedCluster *hyperv1.HostedCluster + nodeSelector map[string]string +} + +func NewKubeKubeVirtNodeSelectorTest(ctx context.Context, cl crclient.Client, hc *hyperv1.HostedCluster) *KubeVirtNodeSelectorTest { + return &KubeVirtNodeSelectorTest{ + ctx: ctx, + client: cl, + hostedCluster: hc, + nodeSelector: map[string]string{ + "nodepool-nodeselector-testlabel": utilrand.String(10), + }, + } +} + +func (k KubeVirtNodeSelectorTest) Setup(t *testing.T) { + if globalOpts.Platform != hyperv1.KubevirtPlatform { + t.Skip("test only supported on platform KubeVirt") + } + + t.Log("Starting test KubeVirtNodeSelectorTest") + + g := NewWithT(t) + infraClient, err := k.GetInfraClient() + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(retry.RetryOnConflict(retry.DefaultRetry, func() error { + labelSelector := labels.SelectorFromValidatedSet(labels.Set{"cpu-vendor.node.kubevirt.io/Intel": "true"}) + nodes := &corev1.NodeList{} + err = infraClient.GetInfraClient().List(k.ctx, nodes, &crclient.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return err + } + if len(nodes.Items) == 0 { + labelSelector := labels.SelectorFromValidatedSet(labels.Set{"cpu-vendor.node.kubevirt.io/AMD": "true"}) + err = infraClient.GetInfraClient().List(k.ctx, nodes, &crclient.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return err + } + } + g.Expect(len(nodes.Items)).ToNot(Equal(0)) + node := &nodes.Items[0] + nodeLabels := node.Labels + for key, value := range k.nodeSelector { + nodeLabels[key] = value + } + node.SetLabels(nodeLabels) + err := infraClient.GetInfraClient().Update(k.ctx, node, &crclient.UpdateOptions{}) + return err + })).To(Succeed()) +} + +func (k KubeVirtNodeSelectorTest) Run(t *testing.T, nodePool hyperv1.NodePool, _ []corev1.Node) { + g := NewWithT(t) + + localInfraNS := manifests.HostedControlPlaneNamespace(k.hostedCluster.Namespace, k.hostedCluster.Name) + infraClient, err := k.GetInfraClient() + g.Expect(err).ShouldNot(HaveOccurred()) + + vmis := &kubevirtv1.VirtualMachineInstanceList{} + labelSelector := labels.SelectorFromValidatedSet(labels.Set{hyperv1.NodePoolNameLabel: nodePool.Name}) + g.Eventually(func(gg Gomega) { + gg.Expect( + infraClient.GetInfraClient().List(k.ctx, vmis, &crclient.ListOptions{Namespace: localInfraNS, LabelSelector: labelSelector}), + ).To(Succeed()) + + gg.Expect(vmis.Items).To(HaveLen(1)) + vmi := vmis.Items[0] + + gg.Expect(vmi.Spec.NodeSelector).ToNot(BeNil()) + gg.Expect(vmi.Spec.NodeSelector).To(Equal(k.nodeSelector)) + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) +} + +func (k KubeVirtNodeSelectorTest) BuildNodePoolManifest(defaultNodepool hyperv1.NodePool) (*hyperv1.NodePool, error) { + nodePool := &hyperv1.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: k.hostedCluster.Name + "-" + "test-kv-nodeselector", + Namespace: k.hostedCluster.Namespace, + }, + } + defaultNodepool.Spec.DeepCopyInto(&nodePool.Spec) + + nodePool.Spec.Replicas = ptr.To(int32(1)) + nodePool.Spec.Platform.Kubevirt.NodeSelector = k.nodeSelector + + return nodePool, nil +} + +func (k KubeVirtNodeSelectorTest) GetInfraClient() (kvinfra.KubevirtInfraClient, error) { + localInfraNS := manifests.HostedControlPlaneNamespace(k.hostedCluster.Namespace, k.hostedCluster.Name) + cm := kvinfra.NewKubevirtInfraClientMap() + var creds *hyperv1.KubevirtPlatformCredentials + return cm.DiscoverKubevirtClusterClient(k.ctx, k.client, k.hostedCluster.Spec.InfraID, creds, localInfraNS, k.hostedCluster.Namespace) +} diff --git a/test/e2e/nodepool_test.go b/test/e2e/nodepool_test.go index 01b8146078..dff4b1f3ca 100644 --- a/test/e2e/nodepool_test.go +++ b/test/e2e/nodepool_test.go @@ -108,6 +108,14 @@ func TestNodePool(t *testing.T) { name: "KubeKubeVirtJsonPatchTest", test: NewKubeKubeVirtJsonPatchTest(ctx, mgtClient, hostedCluster), }, + { + name: "KubeVirtNodeMultinetTest", + test: NewKubeVirtMultinetTest(ctx, mgtClient, hostedCluster), + }, + { + name: "KubeVirtNodeSelectorTest", + test: NewKubeKubeVirtNodeSelectorTest(ctx, mgtClient, hostedCluster), + }, } for _, testCase := range nodePoolTests { @@ -234,27 +242,52 @@ func validateNodePoolConditions(t *testing.T, ctx context.Context, client crclie expectedConditions[hyperv1.NodePoolValidArchPlatform] = corev1.ConditionFalse } + t.Logf("validating status for nodepool %s/%s", nodePool.Namespace, nodePool.Name) start := time.Now() + previousResourceVersion := "" + previousConditions := map[string]hyperv1.NodePoolCondition{} err := wait.PollImmediateWithContext(ctx, 10*time.Second, 10*time.Minute, func(ctx context.Context) (bool, error) { if err := client.Get(ctx, crclient.ObjectKeyFromObject(nodePool), nodePool); err != nil { t.Logf("Failed to get nodepool: %v", err) return false, nil } - for _, condition := range nodePool.Status.Conditions { + if nodePool.ResourceVersion == previousResourceVersion { + // nothing's changed since the last time we checked + return false, nil + } + previousResourceVersion = nodePool.ResourceVersion + + currentConditions := map[string]hyperv1.NodePoolCondition{} + conditionsValid := true + for i, condition := range nodePool.Status.Conditions { expectedStatus, known := expectedConditions[condition.Type] if !known { return false, fmt.Errorf("unknown condition %s", condition.Type) } + conditionsValid = conditionsValid && (condition.Status == expectedStatus) + currentConditions[condition.Type] = nodePool.Status.Conditions[i] + if conditionsIdentical(currentConditions[condition.Type], previousConditions[condition.Type]) { + // no need to spam anything, we already said it when we processed this last time + continue + } + prefix := "" if condition.Status != expectedStatus { - t.Logf("condition %s status [%s] doesn't match the expected status [%s]", condition.Type, condition.Status, expectedStatus) - return false, nil + prefix = "in" + } + msg := fmt.Sprintf("%scorrect condition: wanted %s=%s, got %s=%s", prefix, condition.Type, expectedStatus, condition.Type, condition.Status) + if condition.Reason != "" { + msg += ": " + condition.Reason + } + if condition.Message != "" { + msg += "(" + condition.Message + ")" } - t.Logf("observed condition %s status to match expected stauts [%s]", condition.Type, expectedStatus) + t.Log(msg) } + previousConditions = currentConditions - return true, nil + return conditionsValid, nil }) duration := time.Since(start).Round(time.Second) @@ -263,3 +296,7 @@ func validateNodePoolConditions(t *testing.T, ctx context.Context, client crclie } t.Logf("Successfully validated all expected NodePool conditions in %s", duration) } + +func conditionsIdentical(a, b hyperv1.NodePoolCondition) bool { + return a.Type == b.Type && a.Status == b.Status && a.Reason == b.Reason && a.Message == b.Message +} diff --git a/test/e2e/util/hypershift_framework.go b/test/e2e/util/hypershift_framework.go index f155cbea05..4a4e5c28e5 100644 --- a/test/e2e/util/hypershift_framework.go +++ b/test/e2e/util/hypershift_framework.go @@ -143,13 +143,6 @@ func (h *hypershiftTest) postTeardown(hostedCluster *hyperv1.HostedCluster, opts } h.Run("PostTeardown", func(t *testing.T) { - // All clusters created during tests should ultimately conform to our API - // budget. This should be checked after deletion to ensure that API operations - // for the full lifecycle are accounted for. - if !opts.SkipAPIBudgetVerification { - EnsureAPIBudget(t, h.ctx, h.client, hostedCluster) - } - ValidateMetrics(t, h.ctx, hostedCluster, []string{ hcmetrics.WaitingInitialAvailabilityDurationMetricName, hcmetrics.InitialRollingOutDurationMetricName, diff --git a/vendor/github.com/openshift/library-go/test/library/metrics/query.go b/test/e2e/util/query.go similarity index 68% rename from vendor/github.com/openshift/library-go/test/library/metrics/query.go rename to test/e2e/util/query.go index 8a993215f8..61d221ad01 100644 --- a/vendor/github.com/openshift/library-go/test/library/metrics/query.go +++ b/test/e2e/util/query.go @@ -1,4 +1,7 @@ -package metrics +package util + +// This file is mirrored from library-go +// https://raw.githubusercontent.com/openshift/library-go/master/test/library/metrics/query.go import ( "context" @@ -7,60 +10,50 @@ import ( "fmt" "net" "net/http" - "strings" "time" routeclient "github.com/openshift/client-go/route/clientset/versioned" prometheusapi "github.com/prometheus/client_golang/api" prometheusv1 "github.com/prometheus/client_golang/api/prometheus/v1" - corev1 "k8s.io/api/core/v1" + authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/transport" ) -// NewPrometheusClient returns Prometheus API or error +// newPrometheusClient returns Prometheus API or error // Note: with thanos-querier you must pass an entire Alert as a query. Partial queries return an error, so have to pass the entire alert. // Example query for an Alert: // `ALERTS{alertname="PodDisruptionBudgetAtLimit",alertstate="pending",namespace="pdbnamespace",poddisruptionbudget="pdbname",prometheus="openshift-monitoring/k8s",service="kube-state-metrics",severity="warning"}==1` // Example query: // `scheduler_scheduling_duration_seconds_sum` -func NewPrometheusClient(ctx context.Context, kclient kubernetes.Interface, rc routeclient.Interface) (prometheusv1.API, error) { +func newPrometheusClient(ctx context.Context, kclient kubernetes.Interface, rc routeclient.Interface) (prometheusv1.API, error) { _, err := kclient.CoreV1().Services("openshift-monitoring").Get(ctx, "prometheus-k8s", metav1.GetOptions{}) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get prometheus-k8s service: %w", err) } route, err := rc.RouteV1().Routes("openshift-monitoring").Get(ctx, "thanos-querier", metav1.GetOptions{}) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get thanos-querier route: %w", err) } host := route.Status.Ingress[0].Host - var bearerToken string - secrets, err := kclient.CoreV1().Secrets("openshift-monitoring").List(ctx, metav1.ListOptions{}) + expirationSeconds := int64(24 * time.Hour / time.Second) + req, err := kclient.CoreV1().ServiceAccounts("openshift-monitoring").CreateToken(ctx, "prometheus-k8s", + &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ExpirationSeconds: &expirationSeconds}, + }, metav1.CreateOptions{}) if err != nil { - return nil, fmt.Errorf("could not list secrets in openshift-monitoring namespace") - } - for _, s := range secrets.Items { - if s.Type != corev1.SecretTypeServiceAccountToken || - !strings.HasPrefix(s.Name, "prometheus-k8s") { - continue - } - bearerToken = string(s.Data[corev1.ServiceAccountTokenKey]) - break - } - if len(bearerToken) == 0 { - return nil, fmt.Errorf("prometheus service account not found") + return nil, fmt.Errorf("error requesting token for service account prometheus-k8s: %v", err) } - - return createClient(ctx, kclient, host, bearerToken) + return createClient(ctx, kclient, host, req.Status.Token) } func createClient(ctx context.Context, kclient kubernetes.Interface, host, bearerToken string) (prometheusv1.API, error) { // retrieve router CA routerCAConfigMap, err := kclient.CoreV1().ConfigMaps("openshift-config-managed").Get(ctx, "default-ingress-cert", metav1.GetOptions{}) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get route CA: %w", err) } bundlePEM := []byte(routerCAConfigMap.Data["ca-bundle.crt"]) @@ -88,7 +81,8 @@ func createClient(ctx context.Context, kclient kubernetes.Interface, host, beare ), }) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create Prometheus API client: %w", err) } + return prometheusv1.NewAPI(client), nil } diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go index 9cbd8d4793..e96ae63cdb 100644 --- a/test/e2e/util/util.go +++ b/test/e2e/util/util.go @@ -24,20 +24,14 @@ import ( suppconfig "github.com/openshift/hypershift/support/config" "github.com/openshift/hypershift/support/util" support "github.com/openshift/hypershift/support/util" - "github.com/openshift/library-go/test/library/metrics" - promapi "github.com/prometheus/client_golang/api" prometheusv1 "github.com/prometheus/client_golang/api/prometheus/v1" - promv1 "github.com/prometheus/client_golang/api/prometheus/v1" - promconfig "github.com/prometheus/common/config" "github.com/prometheus/common/model" - prommodel "github.com/prometheus/common/model" "go.uber.org/zap/zaptest" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" authenticationv1 "k8s.io/api/authentication/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - kapierror "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -176,6 +170,48 @@ func WaitForGuestClient(t *testing.T, ctx context.Context, client crclient.Clien return guestClient } +func GetGuestKubeconfigHost(t *testing.T, ctx context.Context, client crclient.Client, hostedCluster *hyperv1.HostedCluster) (string, error) { + guestKubeConfigSecretData, err := WaitForGuestKubeConfig(t, ctx, client, hostedCluster) + if err != nil { + return "", fmt.Errorf("couldn't get guest kubeconfig: %v", err) + } + + guestConfig, err := clientcmd.RESTConfigFromKubeConfig(guestKubeConfigSecretData) + if err != nil { + return "", fmt.Errorf("couldn't load guest kubeconfig: %v", err) + } + + host := guestConfig.Host + if len(host) == 0 { + return "", fmt.Errorf("guest kubeconfig host is empty") + } + return host, nil +} + +func WaitForGuestKubeconfigHostUpdate(t *testing.T, ctx context.Context, client crclient.Client, hostedCluster *hyperv1.HostedCluster, oldHost string) { + g := NewWithT(t) + waitTimeout := 30 * time.Minute + pollingInterval := 15 * time.Second + + t.Logf("Waiting for guest kubeconfig host update") + var newHost string + var getHostError error + err := wait.PollUntilContextTimeout(ctx, pollingInterval, waitTimeout, true, func(ctx context.Context) (done bool, err error) { + newHost, getHostError = GetGuestKubeconfigHost(t, ctx, client, hostedCluster) + if getHostError != nil { + t.Logf("failed to get guest kubeconfig host: %v", getHostError) + return false, nil + } + if newHost == oldHost { + t.Logf("guest kubeconfig host is not yet updated, keep polling") + return false, nil + } + return true, nil + }) + g.Expect(err).NotTo(HaveOccurred(), "failed to wait for guest kubeconfig host update") + t.Logf("Guest kubeconfig host switched from %s to %s", oldHost, newHost) +} + func WaitForNReadyNodes(t *testing.T, ctx context.Context, client crclient.Client, n int32, platform hyperv1.PlatformType) []corev1.Node { g := NewWithT(t) start := time.Now() @@ -409,6 +445,29 @@ func WaitForNodePoolDesiredNodes(t *testing.T, ctx context.Context, client crcli func EnsureNoCrashingPods(t *testing.T, ctx context.Context, client crclient.Client, hostedCluster *hyperv1.HostedCluster) { t.Run("EnsureNoCrashingPods", func(t *testing.T) { + + var crashToleration int32 + + switch hostedCluster.Spec.Platform.Type { + case hyperv1.KubevirtPlatform: + kvPlatform := hostedCluster.Spec.Platform.Kubevirt + // External infra can be slow at times due to the nested nature + // of how external infra is tested within a kubevirt hcp running + // within baremetal ocp. Occasionally pods will fail with + // "Error: context deadline exceeded" reported by the kubelet. This + // seems to be an infra issue with etcd latency within the external + // infra test environment. Tolerating a single restart for random + // components helps. + // + // This toleration is not used for the default local HCP KubeVirt, + // only external infra + if kvPlatform != nil && kvPlatform.Credentials != nil { + crashToleration = 1 + } + default: + crashToleration = 0 + } + namespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) var podList corev1.PodList @@ -434,7 +493,7 @@ func EnsureNoCrashingPods(t *testing.T, ctx context.Context, client crclient.Cli } for _, containerStatus := range pod.Status.ContainerStatuses { - if containerStatus.RestartCount > 0 { + if containerStatus.RestartCount > crashToleration { t.Errorf("Container %s in pod %s has a restartCount > 0 (%d)", containerStatus.Name, pod.Name, containerStatus.RestartCount) } } @@ -541,188 +600,6 @@ func EnsureMachineDeploymentGeneration(t *testing.T, ctx context.Context, hostCl }) } -func EnsurePSANotPrivileged(t *testing.T, ctx context.Context, guestClient crclient.Client) { - t.Run("EnsurePSANotPrivileged", func(t *testing.T) { - testNamespaceName := "e2e-psa-check" - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testNamespaceName, - }, - } - if err := guestClient.Create(ctx, namespace); err != nil { - t.Fatalf("failed to create namespace: %v", err) - } - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pod", - Namespace: testNamespaceName, - }, - Spec: corev1.PodSpec{ - NodeSelector: map[string]string{ - "e2e.openshift.io/unschedulable": "should-not-run", - }, - Containers: []corev1.Container{ - {Name: "first", Image: "something-innocuous"}, - }, - HostPID: true, // enforcement of restricted or baseline policy should reject this - }, - } - err := guestClient.Create(ctx, pod) - if err == nil { - t.Errorf("pod admitted when rejection was expected") - } - if !kapierror.IsForbidden(err) { - t.Errorf("forbidden error expected, got %s", err) - } - }) -} - -func EnsureAPIBudget(t *testing.T, ctx context.Context, client crclient.Client, hostedCluster *hyperv1.HostedCluster) { - t.Run("EnsureAPIBudget", func(t *testing.T) { - - // Get hypershift-operator token - operatorServiceAccount := &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: "operator", - Namespace: "hypershift", - }, - } - if err := client.Get(ctx, crclient.ObjectKeyFromObject(operatorServiceAccount), operatorServiceAccount); err != nil { - t.Fatalf("failed to get hypershift operator service account: %v", err) - } - var secretName string - for _, secret := range operatorServiceAccount.Secrets { - if strings.HasPrefix(secret.Name, "operator-token-") { - secretName = secret.Name - break - } - } - - token, err := getPrometheusToken(ctx, secretName, client) - if err != nil { - t.Fatalf("can't get token for Prometheus; %v", err) - } - - // Get thanos-querier endpoint - promRoute := &routev1.Route{ - ObjectMeta: metav1.ObjectMeta{ - Name: "thanos-querier", - Namespace: "openshift-monitoring", - }, - } - if err = client.Get(ctx, crclient.ObjectKeyFromObject(promRoute), promRoute); err != nil { - t.Skip("unable to get prometheus route, skipping") - } - if len(promRoute.Status.Ingress) == 0 { - t.Skip("unable to get prometheus ingress, skipping") - } - promEndpoint := fmt.Sprintf("https://%s", promRoute.Status.Ingress[0].Host) - - // Create prometheus client - cfg := promconfig.HTTPClientConfig{ - Authorization: &promconfig.Authorization{ - Type: "Bearer", - Credentials: promconfig.Secret(token), - }, - TLSConfig: promconfig.TLSConfig{ - InsecureSkipVerify: true, - }, - } - rt, err := promconfig.NewRoundTripperFromConfig(cfg, "e2e-budget-checker") - if err != nil { - t.Fatalf("failed to get create round tripper: %v", err) - } - promClient, err := promapi.NewClient(promapi.Config{ - Address: promEndpoint, - RoundTripper: rt, - }) - if err != nil { - t.Fatalf("failed to get create prometheus client: %v", err) - } - v1api := promv1.NewAPI(promClient) - - // Compare metrics against budgets - namespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) - clusterAgeMinutes := int32(time.Since(hostedCluster.CreationTimestamp.Time).Round(time.Minute).Minutes()) - budgets := []struct { - name string - query string - budget float64 - }{ - { - name: "control-plane-operator read", - query: fmt.Sprintf(`sum by (pod) (max_over_time(hypershift:controlplane:component_api_requests_total{app="control-plane-operator", method="GET", namespace=~"%s"}[%dm]))`, namespace, clusterAgeMinutes), - budget: 3000, - }, - { - name: "control-plane-operator mutate", - query: fmt.Sprintf(`sum by (pod) (max_over_time(hypershift:controlplane:component_api_requests_total{app="control-plane-operator", method!="GET", namespace=~"%s"}[%dm]))`, namespace, clusterAgeMinutes), - budget: 3000, - }, - { - name: "control-plane-operator no 404 deletes", - query: fmt.Sprintf(`sum by (pod) (max_over_time(hypershift:controlplane:component_api_requests_total{app="control-plane-operator", method="DELETE", code="404", namespace=~"%s"}[%dm]))`, namespace, clusterAgeMinutes), - budget: 50, - }, - // { - // name: "ignition-server p90 payload generation time", - // query: fmt.Sprintf(`sum by (namespace) (max_over_time(hypershift:controlplane:ign_payload_generation_seconds_p90{namespace="%s"}[%dm]))`, namespace, clusterAgeMinutes), - // budget: 45, - // }, - // hypershift-operator budget can not be per HC so metric will be - // significantly under budget for all but the last test(s) to complete on - // a particular test cluster These budgets will also need to scale up with - // additional tests that create HostedClusters - { - name: "hypershift-operator read", - query: `sum(hypershift:operator:component_api_requests_total{method="GET"})`, - budget: 5000, - }, - { - name: "hypershift-operator mutate", - query: `sum(hypershift:operator:component_api_requests_total{method!="GET"})`, - budget: 20000, - }, - { - name: "hypershift-operator no 404 deletes", - query: `sum(hypershift:operator:component_api_requests_total{method="DELETE", code="404"})`, - budget: 50, - }, - } - - for _, budget := range budgets { - t.Run(budget.name, func(t *testing.T) { - result, _, err := v1api.Query(ctx, budget.query, time.Now()) - if err != nil { - t.Fatalf("failed to query prometheus: %v", err) - } - vector, ok := result.(prommodel.Vector) - if !ok { - t.Fatal("expected vector result") - } - if len(vector) == 0 { - if budget.budget <= 50 { - t.Log("no samples returned for query with small budget, skipping check") - } else { - t.Errorf("no samples returned for query with large budget, failed check") - } - } - for _, sample := range vector { - podMsg := "" - if podName, ok := sample.Metric["pod"]; ok { - podMsg = fmt.Sprintf("pod %s ", podName) - } - if float64(sample.Value) > budget.budget { - t.Errorf("%sover budget: budget: %.0f, actual: %.0f", podMsg, budget.budget, sample.Value) - } else { - t.Logf("%swithin budget: budget: %.0f, actual: %.0f", podMsg, budget.budget, sample.Value) - } - } - }) - } - }) -} - func EnsureAllRoutesUseHCPRouter(t *testing.T, ctx context.Context, hostClient crclient.Client, hostedCluster *hyperv1.HostedCluster) { t.Run("EnsureAllRoutesUseHCPRouter", func(t *testing.T) { for _, svc := range hostedCluster.Spec.Services { @@ -763,6 +640,7 @@ func EnsureNetworkPolicies(t *testing.T, ctx context.Context, c crclient.Client, "cluster-node-tuning-operator", "capi-provider-controller-manager", "cluster-api", + "etcd", // For etcd-defrag leader elections "control-plane-operator", "control-plane-pki-operator", "hosted-cluster-config-operator", @@ -802,14 +680,9 @@ func EnsureNetworkPolicies(t *testing.T, ctx context.Context, c crclient.Client, } // Validate cluster-version-operator is not allowed to access management KAS. - stdOut, err := RunCommandInPod(ctx, c, "cluster-version-operator", hcpNamespace, command, "cluster-version-operator") + _, err = RunCommandInPod(ctx, c, "cluster-version-operator", hcpNamespace, command, "cluster-version-operator") g.Expect(err).To(HaveOccurred()) - // Expect curl to timeout https://curl.se/docs/manpage.html (exit code 28). - if err != nil && !strings.Contains(err.Error(), "command terminated with exit code 28") { - t.Errorf("cluster version pod was unexpectedly allowed to reach the management KAS. stdOut: %s. stdErr: %s", stdOut, err.Error()) - } - // Validate private router is not allowed to access management KAS. if hostedCluster.Spec.Platform.Type == hyperv1.AWSPlatform { if hostedCluster.Spec.Platform.AWS.EndpointAccess != hyperv1.Private { @@ -817,18 +690,13 @@ func EnsureNetworkPolicies(t *testing.T, ctx context.Context, c crclient.Client, // === CONT TestCreateClusterPrivate/EnsureHostedCluster/EnsureNetworkPolicies/EnsureLimitedEgressTrafficToManagementKAS // util.go:851: private router pod was unexpectedly allowed to reach the management KAS. stdOut: . stdErr: Internal error occurred: error executing command in container: container is not created or running // Should be solve with https://issues.redhat.com/browse/HOSTEDCP-1200 - stdOut, err := RunCommandInPod(ctx, c, "private-router", hcpNamespace, command, "private-router") + _, err := RunCommandInPod(ctx, c, "private-router", hcpNamespace, command, "private-router") g.Expect(err).To(HaveOccurred()) - - // Expect curl to timeout https://curl.se/docs/manpage.html (exit code 28). - if err != nil && !strings.Contains(err.Error(), "command terminated with exit code 28") { - t.Errorf("private router pod was unexpectedly allowed to reach the management KAS. stdOut: %s. stdErr: %s", stdOut, err.Error()) - } } } // Validate cluster api is allowed to access management KAS. - stdOut, err = RunCommandInPod(ctx, c, "cluster-api", hcpNamespace, command, "manager") + stdOut, err := RunCommandInPod(ctx, c, "cluster-api", hcpNamespace, command, "manager") // Expect curl return a 403 from the KAS. if !strings.Contains(stdOut, "HTTP/2 403") || err != nil { t.Errorf("cluster api pod was unexpectedly not allowed to reach the management KAS. stdOut: %s. stdErr: %s", stdOut, err.Error()) @@ -1146,7 +1014,7 @@ func NewPrometheusClient(ctx context.Context) (prometheusv1.API, error) { if err != nil { panic(err) } - prometheusClient, err := metrics.NewPrometheusClient(ctx, kubeClient, routeClient) + prometheusClient, err := newPrometheusClient(ctx, kubeClient, routeClient) if err != nil { panic(err) } @@ -1186,6 +1054,7 @@ func EnsurePodsWithEmptyDirPVsHaveSafeToEvictAnnotations(t *testing.T, ctx conte auditedAppList := map[string]string{ "cloud-controller-manager": "app", + "cloud-credential-operator": "app", "aws-ebs-csi-driver-controller": "app", "capi-provider-controller-manager": "app", "cloud-network-config-controller": "app", @@ -1423,7 +1292,7 @@ func ValidatePrivateCluster(t *testing.T, ctx context.Context, client crclient.C WaitForNodePoolDesiredNodes(t, ctx, client, hostedCluster) numNodes := clusterOpts.NodePoolReplicas * int32(len(clusterOpts.AWSPlatform.Zones)) - // rollout will not complete if there are no wroker nodes. + // rollout will not complete if there are no worker nodes. if numNodes > 0 { // Wait for the rollout to be complete t.Logf("Waiting for cluster rollout. Image: %s", clusterOpts.ReleaseImage) @@ -1435,8 +1304,6 @@ func ValidatePrivateCluster(t *testing.T, ctx context.Context, client crclient.C serviceStrategy := util.ServicePublishingStrategyByTypeByHC(hostedCluster, hyperv1.APIServer) g.Expect(serviceStrategy).ToNot(BeNil()) - // Private clusters should always use Route - g.Expect(serviceStrategy.Type).To(Equal(hyperv1.Route)) if serviceStrategy.Route != nil && serviceStrategy.Route.Hostname != "" { g.Expect(hostedCluster.Status.ControlPlaneEndpoint.Host).To(Equal(serviceStrategy.Route.Hostname)) } else { @@ -1480,20 +1347,37 @@ func validateHostedClusterConditions(t *testing.T, ctx context.Context, client c if hostedCluster.Spec.Platform.Type == hyperv1.KubevirtPlatform && hostedCluster.Spec.Networking.NetworkType == hyperv1.OVNKubernetes { - expectedConditions[hyperv1.ValidKubeVirtInfraNetworkMTU] = metav1.ConditionTrue + if hostedCluster.Annotations[hyperv1.ManagementPlatformAnnotation] == string(hyperv1.AWSPlatform) { + // AWS platform supports Jumbo frames + expectedConditions[hyperv1.ValidKubeVirtInfraNetworkMTU] = metav1.ConditionTrue + } else if hostedCluster.Annotations[hyperv1.ManagementPlatformAnnotation] == string(hyperv1.AzurePlatform) { + // Azure platform doesn't support Jumbo frames + expectedConditions[hyperv1.ValidKubeVirtInfraNetworkMTU] = metav1.ConditionFalse + } } + t.Logf("validating status for hostedcluster %s/%s", hostedCluster.Namespace, hostedCluster.Name) start := time.Now() + previousResourceVersion := "" + previousConditions := map[string]metav1.Condition{} err := wait.PollImmediateWithContext(ctx, 10*time.Second, 10*time.Minute, func(ctx context.Context) (bool, error) { if err := client.Get(ctx, crclient.ObjectKeyFromObject(hostedCluster), hostedCluster); err != nil { t.Logf("Failed to get hostedcluster: %v", err) return false, nil } - for _, condition := range hostedCluster.Status.Conditions { + if hostedCluster.ResourceVersion == previousResourceVersion { + // nothing's changed since the last time we checked + return false, nil + } + previousResourceVersion = hostedCluster.ResourceVersion + + currentConditions := map[string]metav1.Condition{} + conditionsValid := true + for i, condition := range hostedCluster.Status.Conditions { if condition.Type == string(hyperv1.ClusterVersionUpgradeable) { // ClusterVersionUpgradeable condition status is not always guranteed to be true, skip. - t.Logf("observed condition %s status [%s]", condition.Type, condition.Status) + t.Logf("unchecked condition: %s", formatCondition(condition)) continue } @@ -1502,14 +1386,23 @@ func validateHostedClusterConditions(t *testing.T, ctx context.Context, client c return false, fmt.Errorf("unknown condition %s", condition.Type) } + conditionsValid = conditionsValid && (condition.Status == expectedStatus) + + currentConditions[condition.Type] = hostedCluster.Status.Conditions[i] + if conditionsIdentical(currentConditions[condition.Type], previousConditions[condition.Type]) { + // no need to spam anything, we already said it when we processed this last time + continue + } + prefix := "" if condition.Status != expectedStatus { - t.Logf("condition %s status [%s] doesn't match the expected status [%s]", condition.Type, condition.Status, expectedStatus) - return false, nil + prefix = "in" } - t.Logf("observed condition %s status to match expected stauts [%s]", condition.Type, expectedStatus) + msg := fmt.Sprintf("%scorrect condition: wanted %s=%s, got %s", prefix, condition.Type, expectedStatus, formatCondition(condition)) + t.Log(msg) } + previousConditions = currentConditions - return true, nil + return conditionsValid, nil }) duration := time.Since(start).Round(time.Second) @@ -1519,6 +1412,21 @@ func validateHostedClusterConditions(t *testing.T, ctx context.Context, client c t.Logf("Successfully validated all expected HostedCluster conditions in %s", duration) } +func formatCondition(condition metav1.Condition) string { + msg := fmt.Sprintf("%s=%s", condition.Type, condition.Status) + if condition.Reason != "" { + msg += ": " + condition.Reason + } + if condition.Message != "" { + msg += "(" + condition.Message + ")" + } + return msg +} + +func conditionsIdentical(a, b metav1.Condition) bool { + return a.Type == b.Type && a.Status == b.Status && a.Reason == b.Reason && a.Message == b.Message +} + func EnsureHCPPodsAffinitiesAndTolerations(t *testing.T, ctx context.Context, client crclient.Client, hostedCluster *hyperv1.HostedCluster) { t.Run("EnsureHCPPodsAffinitiesAndTolerations", func(t *testing.T) { namespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) diff --git a/test/integration/README.md b/test/integration/README.md new file mode 100644 index 0000000000..6fa5edd972 --- /dev/null +++ b/test/integration/README.md @@ -0,0 +1,63 @@ +# `kind`-Based Integration Testing + +This test framework has a two-fold goal: provide a short iterating cycle for work on HyperShift operators locally +as well as a quick validation harness for features that do not require cloud-provider-specific functionality. + +## Local Operation + +The `run.sh` script allows for local iteration on HyperShift - use a shell to set up the environment and keep it +running, while using other shells to interact with the environment or even iterate on tests. + +### Prerequisites + +Make sure you have `kind` and some container image building utility (`docker`, `buildah`, `podman`) installed. +Keep an eye out for `too many open files` errors when launching `HostedCluster`s and apply the [remedy](https://kind.sigs.k8s.io/docs/user/known-issues/#pod-errors-due-to-too-many-open-files). + +An image is built by copying local binaries into a base container. Ensure the script knows how to map your +local operating system to a host container image, or you will have issues with dynamic linking. + +Visit the [web console](https://console.redhat.com/openshift/create/local) to create a local pull secret - +this is required to interrogate OCP release bundles. + +Set up the following environment variables: + +```shell +export PATH="${PATH}:$(realpath ./bin)" +export WORK_DIR=/tmp/integration # this directory is persistent between runs, cleared only as necessary +export PULL_SECRET="REPLACE-ME" # point this environment variable at the pull secret you generated +``` + +### Setup + +Run the following in a shell - the process will set up the `kind` cluster and requisite `hypershift` infrastructure, +then wait for `SIGINT` indefinitely. On interrupt, the process will clean up after itself. + +# TODO: add some mechanism to choose which tests the setup runs for +```shell +./test/integration/run.sh \ + cluster-up \ # start the kind cluster + image \ # build the container image for HyperShift, load it into the cluster + setup # start the HyperShift operator and any HostedClusters for selected tests +``` + +### Tests + +Run the following for quick iteration - the test process will expect that setup is complete. Use `${GO_TEST_FLAGS}` to +specify what subset of the tests to run. + +```shell +./test/integration/run.sh \ + test # run tests +``` + +### Refreshing Image Content + +When you've made changes to the HyperShift codebase and need to re-deploy the operators, run the following - +a new image will be built, loaded into the cluster, and all `Pod`s deploying the image will be deleted, so +the new image is picked up on restart. + +```shell +./test/integration/run.sh \ + image \ # build the container image for HyperShift, load it into the cluster + reload # power-cycle all pods that should be running the image +``` \ No newline at end of file diff --git a/test/integration/control_plane_pki_operator.go b/test/integration/control_plane_pki_operator.go new file mode 100644 index 0000000000..8b10f343fa --- /dev/null +++ b/test/integration/control_plane_pki_operator.go @@ -0,0 +1,339 @@ +//go:build integration || e2e + +package integration + +import ( + "context" + "crypto/sha256" + "fmt" + "math/big" + "strings" + "testing" + "time" + + certificatesv1alpha1 "github.com/openshift/hypershift/api/certificates/v1alpha1" + certificatesv1alpha1applyconfigurations "github.com/openshift/hypershift/client/applyconfiguration/certificates/v1alpha1" + authenticationv1 "k8s.io/api/authentication/v1" + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + certificatesv1applyconfigurations "k8s.io/client-go/applyconfigurations/certificates/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + controllerruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + + hypershiftv1beta1 "github.com/openshift/hypershift/api/hypershift/v1beta1" + "github.com/openshift/hypershift/control-plane-pki-operator/certificates" + pkimanifests "github.com/openshift/hypershift/control-plane-pki-operator/manifests" + "github.com/openshift/hypershift/hypershift-operator/controllers/manifests" + "github.com/openshift/hypershift/test/integration/framework" +) + +func RunTestControlPlanePKIOperatorBreakGlassCredentials(t *testing.T, ctx context.Context, hostedCluster *hypershiftv1beta1.HostedCluster, mgmt, guest *framework.Clients) { + t.Run("break-glass-credentials", func(t *testing.T) { + hostedControlPlaneNamespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) + + for _, testCase := range []struct { + clientCertificate *corev1.Secret + signer certificates.SignerClass + }{ + { + clientCertificate: pkimanifests.CustomerSystemAdminClientCertSecret(hostedControlPlaneNamespace), + signer: certificates.CustomerBreakGlassSigner, + }, + { + clientCertificate: pkimanifests.SRESystemAdminClientCertSecret(hostedControlPlaneNamespace), + signer: certificates.SREBreakGlassSigner, + }, + } { + testCase := testCase + t.Run(string(testCase.signer), func(t *testing.T) { + t.Parallel() + t.Run("direct fetch", func(t *testing.T) { + t.Logf("Grabbing customer break-glass credentials from client certificate secret %s/%s", testCase.clientCertificate.Namespace, testCase.clientCertificate.Name) + if err := wait.PollUntilContextTimeout(ctx, 1*time.Second, 3*time.Minute, true, func(ctx context.Context) (done bool, err error) { + getErr := mgmt.CRClient.Get(ctx, controllerruntimeclient.ObjectKeyFromObject(testCase.clientCertificate), testCase.clientCertificate) + if apierrors.IsNotFound(getErr) { + return false, nil + } + return getErr == nil, err + }); err != nil { + t.Fatalf("client cert didn't become available: %v", err) + } + + validateCertificateAuth(t, ctx, guest.Cfg, testCase.clientCertificate.Data["tls.crt"], testCase.clientCertificate.Data["tls.key"], func(s string) bool { + return strings.HasPrefix(s, certificates.CommonNamePrefix(testCase.signer)) + }) + }) + + t.Run("CSR flow", func(t *testing.T) { + t.Run("invalid CN flagged in status", func(t *testing.T) { + t.Skip("TODO(skuznets): enable these tests when we're running against KAS 1.29+") + validateInvalidCN(t, ctx, hostedCluster, mgmt, guest, testCase.signer) + }) + signedCrt := validateCSRFlow(t, ctx, hostedCluster, mgmt, guest, testCase.signer) + + t.Run("revocation", func(t *testing.T) { + validateRevocation(t, ctx, hostedCluster, mgmt, guest, testCase.signer, signedCrt) + }) + }) + }) + } + t.Run("independent signers", func(t *testing.T) { + t.Log("generating new break-glass credentials for more than one signer") + customerSignedCrt := validateCSRFlow(t, ctx, hostedCluster, mgmt, guest, certificates.CustomerBreakGlassSigner) + sreSignedCrt := validateCSRFlow(t, ctx, hostedCluster, mgmt, guest, certificates.SREBreakGlassSigner) + + t.Logf("revoking the %q signer", certificates.CustomerBreakGlassSigner) + validateRevocation(t, ctx, hostedCluster, mgmt, guest, certificates.CustomerBreakGlassSigner, customerSignedCrt) + + t.Logf("ensuring the break-glass credentials from %q signer still work", certificates.SREBreakGlassSigner) + _, sreKey, _, _ := framework.CertKeyRequest(t, certificates.SREBreakGlassSigner) + validateCertificateAuth(t, ctx, guest.Cfg, sreSignedCrt, sreKey, func(s string) bool { + return s == framework.CommonNameFor(certificates.SREBreakGlassSigner) + }) + }) + }) +} + +func base36sum224(data []byte) string { + hash := sha256.Sum224(data) + var i big.Int + i.SetBytes(hash[:]) + return i.Text(36) +} + +func clientForCertKey(t *testing.T, root *rest.Config, crt, key []byte) *kubernetes.Clientset { + t.Log("amending the existing kubeconfig to use break-glass client certificate credentials") + certConfig := rest.AnonymousClientConfig(root) + certConfig.TLSClientConfig.CertData = crt + certConfig.TLSClientConfig.KeyData = key + + breakGlassTenantClient, err := kubernetes.NewForConfig(certConfig) + if err != nil { + t.Fatalf("could not create client: %v", err) + } + + return breakGlassTenantClient +} + +func validateCertificateAuth(t *testing.T, ctx context.Context, root *rest.Config, crt, key []byte, usernameValid func(string) bool) { + t.Log("validating that the client certificate provides the appropriate access") + breakGlassTenantClient := clientForCertKey(t, root, crt, key) + + t.Log("issuing SSR to identify the subject we are given using the client certificate") + response, err := breakGlassTenantClient.AuthenticationV1().SelfSubjectReviews().Create(ctx, &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("could not send SSR: %v", err) + } + + t.Log("ensuring that the SSR identifies the client certificate as having system:masters power and correct username") + if !sets.New[string](response.Status.UserInfo.Groups...).Has("system:masters") || + !usernameValid(response.Status.UserInfo.Username) { + t.Fatalf("did not get correct SSR response: %#v", response) + } +} + +func validateInvalidCN(t *testing.T, ctx context.Context, hostedCluster *hypershiftv1beta1.HostedCluster, mgmt, guest *framework.Clients, signer certificates.SignerClass) { + hostedControlPlaneNamespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) + _, _, _, wrongCsr := framework.CertKeyRequest(t, signer) + signerName := certificates.SignerNameForHC(hostedCluster, signer) + wrongCSRName := base36sum224(append(append([]byte(hostedControlPlaneNamespace), []byte(signer)...), []byte(t.Name())...)) + t.Logf("creating invalid CSR %q for signer %q, requesting client auth usages", wrongCSRName, signerName) + wrongCSRCfg := certificatesv1applyconfigurations.CertificateSigningRequest(wrongCSRName) + wrongCSRCfg.Spec = certificatesv1applyconfigurations.CertificateSigningRequestSpec(). + WithSignerName(signerName). + WithRequest(wrongCsr...). + WithUsages(certificatesv1.UsageClientAuth) + if _, err := mgmt.KubeClient.CertificatesV1().CertificateSigningRequests().Apply(ctx, wrongCSRCfg, metav1.ApplyOptions{FieldManager: "e2e-test"}); err != nil { + t.Fatalf("failed to create CSR: %v", err) + } + + t.Logf("creating CSRA %s/%s to trigger automatic approval of the CSR", hostedControlPlaneNamespace, wrongCSRName) + wrongCSRACfg := certificatesv1alpha1applyconfigurations.CertificateSigningRequestApproval(wrongCSRName, hostedControlPlaneNamespace) + if _, err := mgmt.HyperShiftClient.CertificatesV1alpha1().CertificateSigningRequestApprovals(hostedControlPlaneNamespace).Apply(ctx, wrongCSRACfg, metav1.ApplyOptions{FieldManager: "e2e-test"}); err != nil { + t.Fatalf("failed to create CSRA: %v", err) + } + + t.Logf("waiting for CSR %q to have invalid CN exposed in status", wrongCSRName) + var lastResourceVersion string + lastTimestamp := time.Now() + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 10*time.Minute, true, func(ctx context.Context) (done bool, err error) { + csr, err := mgmt.KubeClient.CertificatesV1().CertificateSigningRequests().Get(ctx, wrongCSRName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + t.Logf("CSR %q does not exist yet", wrongCSRName) + return false, nil + } + if err != nil && !apierrors.IsNotFound(err) { + return true, err + } + var markedInvalid bool + if csr.ObjectMeta.ResourceVersion != lastResourceVersion { + t.Logf("CSR %q observed at RV %s after %s", wrongCSRName, csr.ObjectMeta.ResourceVersion, time.Since(lastTimestamp)) + for _, condition := range csr.Status.Conditions { + if condition.Type == certificatesv1.CertificateFailed && + condition.Status == corev1.ConditionTrue && + condition.Reason == "SignerValidationFailure" { + markedInvalid = true + } + msg := fmt.Sprintf("%s=%s", condition.Type, condition.Status) + if condition.Reason != "" { + msg += ": " + condition.Reason + } + if condition.Message != "" { + msg += "(" + condition.Message + ")" + } + t.Logf("CSR %q status: %s", csr.Name, msg) + } + lastResourceVersion = csr.ObjectMeta.ResourceVersion + lastTimestamp = time.Now() + } + + if markedInvalid { + return true, nil + } + + return false, nil + }); err != nil { + t.Fatalf("never saw CSR marked as invalid: %v", err) + } +} + +func validateCSRFlow(t *testing.T, ctx context.Context, hostedCluster *hypershiftv1beta1.HostedCluster, mgmt, guest *framework.Clients, signer certificates.SignerClass) []byte { + hostedControlPlaneNamespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) + _, key, csr, _ := framework.CertKeyRequest(t, signer) + signerName := certificates.SignerNameForHC(hostedCluster, signer) + csrName := base36sum224(append(append([]byte(hostedControlPlaneNamespace), []byte(signer)...), []byte(t.Name())...)) + t.Logf("creating CSR %q for signer %q, requesting client auth usages", csrName, signer) + csrCfg := certificatesv1applyconfigurations.CertificateSigningRequest(csrName) + csrCfg.Spec = certificatesv1applyconfigurations.CertificateSigningRequestSpec(). + WithSignerName(signerName). + WithRequest(csr...). + WithUsages(certificatesv1.UsageClientAuth) + if _, err := mgmt.KubeClient.CertificatesV1().CertificateSigningRequests().Apply(ctx, csrCfg, metav1.ApplyOptions{FieldManager: "e2e-test"}); err != nil { + t.Fatalf("failed to create CSR: %v", err) + } + + t.Logf("creating CSRA %s/%s to trigger automatic approval of the CSR", hostedControlPlaneNamespace, csrName) + csraCfg := certificatesv1alpha1applyconfigurations.CertificateSigningRequestApproval(csrName, hostedControlPlaneNamespace) + if _, err := mgmt.HyperShiftClient.CertificatesV1alpha1().CertificateSigningRequestApprovals(hostedControlPlaneNamespace).Apply(ctx, csraCfg, metav1.ApplyOptions{FieldManager: "e2e-test"}); err != nil { + t.Fatalf("failed to create CSRA: %v", err) + } + + t.Logf("waiting for CSR %q to be approved and signed", csrName) + var signedCrt []byte + var lastResourceVersion string + lastTimestamp := time.Now() + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 10*time.Minute, true, func(ctx context.Context) (done bool, err error) { + csr, err := mgmt.KubeClient.CertificatesV1().CertificateSigningRequests().Get(ctx, csrName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + t.Logf("CSR %q does not exist yet", csrName) + return false, nil + } + if err != nil && !apierrors.IsNotFound(err) { + return true, err + } + if csr.ObjectMeta.ResourceVersion != lastResourceVersion { + t.Logf("CSR %q observed at RV %s after %s", csrName, csr.ObjectMeta.ResourceVersion, time.Since(lastTimestamp)) + for _, condition := range csr.Status.Conditions { + msg := fmt.Sprintf("%s=%s", condition.Type, condition.Status) + if condition.Reason != "" { + msg += ": " + condition.Reason + } + if condition.Message != "" { + msg += "(" + condition.Message + ")" + } + t.Logf("CSR %q status: %s", csr.Name, msg) + } + lastResourceVersion = csr.ObjectMeta.ResourceVersion + lastTimestamp = time.Now() + } + + if csr != nil && csr.Status.Certificate != nil { + signedCrt = csr.Status.Certificate + return true, nil + } + + return false, nil + }); err != nil { + t.Fatalf("never saw CSR fulfilled: %v", err) + } + + if len(signedCrt) == 0 { + t.Fatal("got a zero-length signed cert back") + } + + validateCertificateAuth(t, ctx, guest.Cfg, signedCrt, key, func(s string) bool { + return s == framework.CommonNameFor(signer) + }) + + return signedCrt +} + +func validateRevocation(t *testing.T, ctx context.Context, hostedCluster *hypershiftv1beta1.HostedCluster, mgmt, guest *framework.Clients, signer certificates.SignerClass, signedCrt []byte) { + if len(signedCrt) == 0 { + t.Fatalf("programmer error: zero-length signed cert but we haven't failed yet!") + } + + hostedControlPlaneNamespace := manifests.HostedControlPlaneNamespace(hostedCluster.Namespace, hostedCluster.Name) + _, key, _, _ := framework.CertKeyRequest(t, signer) + crrName := base36sum224(append(append([]byte(hostedControlPlaneNamespace), []byte(signer)...), []byte(t.Name())...)) + t.Logf("creating CRR %s/%s to trigger signer certificate revocation", hostedControlPlaneNamespace, crrName) + crrCfg := certificatesv1alpha1applyconfigurations.CertificateRevocationRequest(crrName, hostedControlPlaneNamespace). + WithSpec(certificatesv1alpha1applyconfigurations.CertificateRevocationRequestSpec().WithSignerClass(string(signer))) + if _, err := mgmt.HyperShiftClient.CertificatesV1alpha1().CertificateRevocationRequests(hostedControlPlaneNamespace).Apply(ctx, crrCfg, metav1.ApplyOptions{FieldManager: "e2e-test"}); err != nil { + t.Fatalf("failed to create CRR: %v", err) + } + + t.Logf("waiting for CRR %s/%s to be fulfilled", hostedControlPlaneNamespace, crrName) + var lastResourceVersion string + lastTimestamp := time.Now() + if err := wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 10*time.Minute, true, func(ctx context.Context) (done bool, err error) { + crr, err := mgmt.HyperShiftClient.CertificatesV1alpha1().CertificateRevocationRequests(hostedControlPlaneNamespace).Get(ctx, crrName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + t.Logf("CRR %q does not exist yet", crrName) + return false, nil + } + if err != nil && !apierrors.IsNotFound(err) { + return true, err + } + var complete bool + if crr.ObjectMeta.ResourceVersion != lastResourceVersion { + t.Logf("CRR observed at RV %s after %s", crr.ObjectMeta.ResourceVersion, time.Since(lastTimestamp)) + for _, condition := range crr.Status.Conditions { + msg := fmt.Sprintf("%s=%s", condition.Type, condition.Status) + if condition.Reason != "" { + msg += ": " + condition.Reason + } + if condition.Message != "" { + msg += "(" + condition.Message + ")" + } + t.Logf("CRR status: %s", msg) + if condition.Type == certificatesv1alpha1.PreviousCertificatesRevokedType && condition.Status == metav1.ConditionTrue { + complete = true + } + } + lastResourceVersion = crr.ObjectMeta.ResourceVersion + lastTimestamp = time.Now() + } + if complete { + t.Log("CRR complete") + return true, nil + } + return false, nil + }); err != nil { + t.Fatalf("never saw CRR complete: %v", err) + } + + t.Logf("creating a client using the a certificate from the revoked signer") + previousCertClient := clientForCertKey(t, guest.Cfg, signedCrt, key) + + t.Log("issuing SSR to confirm that we're not authorized to contact the server") + response, err := previousCertClient.AuthenticationV1().SelfSubjectReviews().Create(ctx, &authenticationv1.SelfSubjectReview{}, metav1.CreateOptions{}) + if !apierrors.IsUnauthorized(err) { + t.Fatalf("expected an unauthorized error, got %v, response %#v", err, response) + } +} diff --git a/test/integration/control_plane_pki_operator_test.go b/test/integration/control_plane_pki_operator_test.go new file mode 100644 index 0000000000..b944ab85e6 --- /dev/null +++ b/test/integration/control_plane_pki_operator_test.go @@ -0,0 +1,15 @@ +//go:build integration + +package integration + +import ( + "testing" + + "github.com/openshift/hypershift/test/integration/framework" +) + +func TestControlPlanePKIOperatorBreakGlassCredentials(t *testing.T) { + framework.Run(testContext, log, globalOpts, t, func(t *testing.T, testCtx *framework.TestContext) { + RunTestControlPlanePKIOperatorBreakGlassCredentials(t, testContext, testCtx.HostedCluster, testCtx.MgmtCluster, testCtx.GuestCluster) + }) +} diff --git a/test/integration/framework/artifact.go b/test/integration/framework/artifact.go new file mode 100644 index 0000000000..e56b68b6ba --- /dev/null +++ b/test/integration/framework/artifact.go @@ -0,0 +1,19 @@ +package framework + +import ( + "fmt" + "os" + "path/filepath" +) + +// Artifact opens relPath under the artifact dir, ensuring that owning directories exist. +// Closing the file is the responsibility of the caller. +func Artifact(opts *Options, relPath string) (*os.File, error) { + filePath := filepath.Join(opts.ArtifactDir, relPath) + base := filepath.Dir(filePath) + if err := os.MkdirAll(base, 0777); err != nil { + return nil, fmt.Errorf("couldn't ensure artifact directory: %w", err) + } + + return os.Create(filePath) +} diff --git a/test/integration/framework/assets/00-custom-resource-definition.yaml b/test/integration/framework/assets/00-custom-resource-definition.yaml new file mode 100644 index 0000000000..4ff57e35a3 --- /dev/null +++ b/test/integration/framework/assets/00-custom-resource-definition.yaml @@ -0,0 +1,1137 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/616 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: ingresscontrollers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: IngressController + listKind: IngressControllerList + plural: ingresscontrollers + singular: ingresscontroller + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources. \n When an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out. \n https://kubernetes.io/docs/concepts/services-networking/ingress-controllers \n Whenever possible, sensible defaults for the platform are used. See each field for more details. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the IngressController. + properties: + clientTLS: + description: clientTLS specifies settings for requesting and verifying client certificates, which can be used to enable mutual TLS for edge-terminated and reencrypt routes. + properties: + allowedSubjectPatterns: + description: allowedSubjectPatterns specifies a list of regular expressions that should be matched against the distinguished name on a valid client certificate to filter requests. The regular expressions must use PCRE syntax. If this list is empty, no filtering is performed. If the list is nonempty, then at least one pattern must match a client certificate's distinguished name or else the ingress controller rejects the certificate and denies the connection. + items: + type: string + type: array + x-kubernetes-list-type: atomic + clientCA: + description: clientCA specifies a configmap containing the PEM-encoded CA certificate bundle that should be used to verify a client's certificate. The administrator must create this configmap in the openshift-config namespace. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + clientCertificatePolicy: + description: "clientCertificatePolicy specifies whether the ingress controller requires clients to provide certificates. This field accepts the values \"Required\" or \"Optional\". \n Note that the ingress controller only checks client certificates for edge-terminated and reencrypt TLS routes; it cannot check certificates for cleartext HTTP or passthrough TLS routes." + enum: + - "" + - Required + - Optional + type: string + required: + - clientCA + - clientCertificatePolicy + type: object + defaultCertificate: + description: "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used. \n The secret must contain the following keys and data: \n tls.crt: certificate file contents tls.key: key file contents \n If unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store. \n If a wildcard certificate is used and shared by multiple HTTP/2 enabled routes (which implies ALPN) then clients (i.e., notably browsers) are at liberty to reuse open connections. This means a client can reuse a connection to another route and that is likely to fail. This behaviour is generally known as connection coalescing. \n The in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server." + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + domain: + description: "domain is a DNS name serviced by the ingress controller and is used to configure multiple features: \n * For the LoadBalancerService endpoint publishing strategy, domain is used to configure DNS records. See endpointPublishingStrategy. \n * When using a generated default certificate, the certificate will be valid for domain and its subdomains. See defaultCertificate. \n * The value is published to individual Route statuses so that end-users know where to target external DNS records. \n domain must be unique among all IngressControllers, and cannot be updated. \n If empty, defaults to ingress.config.openshift.io/cluster .spec.domain." + type: string + endpointPublishingStrategy: + description: "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc. \n If unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform: \n AWS: LoadBalancerService (with External scope) Azure: LoadBalancerService (with External scope) GCP: LoadBalancerService (with External scope) IBMCloud: LoadBalancerService (with External scope) AlibabaCloud: LoadBalancerService (with External scope) Libvirt: HostNetwork \n Any other platform types (including None) default to HostNetwork. \n endpointPublishingStrategy cannot be updated." + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. + properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. + properties: + allowedSourceRanges: + description: "allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. \"10.0.0.0/8\" or \"fd00::/8\"). If no range is specified, \"0.0.0.0/0\" for IPv4 and \"::/0\" for IPv6 are used by default, which allows all source addresses. \n To facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the \"router-\" service in the \"openshift-ingress\" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12." + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + nullable: true + type: array + dnsManagementPolicy: + default: Managed + description: 'dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged.' + enum: + - Managed + - Unmanaged + type: string + providerParameters: + description: "providerParameters holds desired load balancer information specific to the underlying infrastructure provider. \n If empty, defaults will be applied. See specific providerParameters fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that are specific to AWS load balancers. \n If empty, defaults will be applied. See specific aws fields for details about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. + properties: + connectionIdleTimeout: + description: connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change. + format: duration + type: string + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer to instantiate for an ingresscontroller. \n Valid values are: \n * \"Classic\": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb \n * \"NLB\": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that are specific to GCP load balancers. \n If empty, defaults will be applied. See specific gcp fields for details about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access is restricted for internal load balancers. \n Valid values are: * \"Global\": Specifying an internal load balancer with Global client access allows clients from any region within the VPC to communicate with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access \n * \"Local\": Specifying an internal load balancer with Local client access means only clients within the same region (and VPC) as the GCP load balancer can communicate with the load balancer. Note that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + ibm: + description: "ibm provides configuration settings that are specific to IBM Cloud load balancers. \n If empty, defaults will be applied. See specific ibm fields for details about their defaults." + properties: + protocol: + description: "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\" \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled." + enum: + - "" + - TCP + - PROXY + type: string + type: object + type: + description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - Nutanix + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - dnsManagementPolicy + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + type: + description: "type is the publishing strategy to use. Valid values are: \n * LoadBalancerService \n Publishes the ingress controller using a Kubernetes LoadBalancer Service. \n In this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment. \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer \n If domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone. \n Wildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms. \n * HostNetwork \n Publishes the ingress controller on node ports where the ingress controller is deployed. \n In this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports. \n * Private \n Does not publish the ingress controller. \n In this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller. \n * NodePortService \n Publishes the ingress controller using a Kubernetes NodePort Service. \n In this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + httpCompression: + description: httpCompression defines a policy for HTTP traffic compression. By default, there is no HTTP compression. + properties: + mimeTypes: + description: "mimeTypes is a list of MIME types that should have compression applied. This list can be empty, in which case the ingress controller does not apply compression. \n Note: Not all MIME types benefit from compression, but HAProxy will still use resources to try to compress if instructed to. Generally speaking, text (html, css, js, etc.) formats benefit from compression, but formats that are already compressed (image, audio, video, etc.) benefit little in exchange for the time and cpu spent on compressing again. See https://joehonton.medium.com/the-gzip-penalty-d31bd697f1a2" + items: + description: "CompressionMIMEType defines the format of a single MIME type. E.g. \"text/css; charset=utf-8\", \"text/html\", \"text/*\", \"image/svg+xml\", \"application/octet-stream\", \"X-custom/customsub\", etc. \n The format should follow the Content-Type definition in RFC 1341: Content-Type := type \"/\" subtype *[\";\" parameter] - The type in Content-Type can be one of: application, audio, image, message, multipart, text, video, or a custom type preceded by \"X-\" and followed by a token as defined below. - The token is a string of at least one character, and not containing white space, control characters, or any of the characters in the tspecials set. - The tspecials set contains the characters ()<>@,;:\\\"/[]?.= - The subtype in Content-Type is also a token. - The optional parameter/s following the subtype are defined as: token \"=\" (token / quoted-string) - The quoted-string, as defined in RFC 822, is surrounded by double quotes and can contain white space plus any character EXCEPT \\, \", and CR. It can also contain any single ASCII character as long as it is escaped by \\." + pattern: ^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$ + type: string + type: array + x-kubernetes-list-type: set + type: object + httpEmptyRequestsPolicy: + default: Respond + description: "httpEmptyRequestsPolicy describes how HTTP connections should be handled if the connection times out before a request is received. Allowed values for this field are \"Respond\" and \"Ignore\". If the field is set to \"Respond\", the ingress controller sends an HTTP 400 or 408 response, logs the connection (if access logging is enabled), and counts the connection in the appropriate metrics. If the field is set to \"Ignore\", the ingress controller closes the connection without sending a response, logging the connection, or incrementing metrics. The default value is \"Respond\". \n Typically, these connections come from load balancers' health probes or Web browsers' speculative connections (\"preconnect\") and can be safely ignored. However, these requests may also be caused by network errors, and so setting this field to \"Ignore\" may impede detection and diagnosis of problems. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts." + enum: + - Respond + - Ignore + type: string + httpErrorCodePages: + description: httpErrorCodePages specifies a configmap with custom error pages. The administrator must create this configmap in the openshift-config namespace. This configmap should have keys in the format "error-page-.http", where is an HTTP error code. For example, "error-page-503.http" defines an error page for HTTP 503 responses. Currently only error pages for 503 and 404 responses can be customized. Each value in the configmap should be the full response, including HTTP headers. Eg- https://raw.githubusercontent.com/openshift/router/fadab45747a9b30cc3f0a4b41ad2871f95827a93/images/router/haproxy/conf/error-page-503.http If this field is empty, the ingress controller uses the default error pages. + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + required: + - name + type: object + httpHeaders: + description: "httpHeaders defines policy for HTTP headers. \n If this field is empty, the default values are used." + properties: + actions: + description: 'actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController''s spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route''s spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.' + properties: + request: + description: 'request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".' + items: + description: IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: host header may not be modified via header actions + rule: self.lowerAscii() != 'host' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + response: + description: 'response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".' + items: + description: IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: host header may not be modified via header actions + rule: self.lowerAscii() != 'host' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + type: object + forwardedHeaderPolicy: + description: "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following: \n * \"Append\", which specifies that the IngressController appends the headers, preserving existing headers. \n * \"Replace\", which specifies that the IngressController sets the headers, replacing any existing Forwarded or X-Forwarded-* headers. \n * \"IfNone\", which specifies that the IngressController sets the headers if they are not already set. \n * \"Never\", which specifies that the IngressController never sets the headers, preserving any existing headers. \n By default, the policy is \"Append\"." + enum: + - Append + - Replace + - IfNone + - Never + type: string + headerNameCaseAdjustments: + description: "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization. \n These adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1. \n For request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses. \n If this field is empty, no request headers are adjusted." + items: + description: IngressControllerHTTPHeaderNameCaseAdjustment is the name of an HTTP header (for example, "X-Forwarded-For") in the desired capitalization. The value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + nullable: true + type: array + uniqueId: + description: "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests. \n If this field is empty, no such header is injected into requests." + properties: + format: + description: 'format specifies the format for the injected HTTP header''s value. This field has no effect unless name is specified. For the HAProxy-based ingress controller implementation, this format uses the same syntax as the HTTP log format. If the field is empty, the default value is "%{+X}o\\ %ci:%cp_%fi:%fp_%Ts_%rt:%pid"; see the corresponding HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3' + maxLength: 1024 + minLength: 0 + pattern: ^(%(%|(\{[-+]?[QXE](,[-+]?[QXE])*\})?([A-Za-z]+|\[[.0-9A-Z_a-z]+(\([^)]+\))?(,[.0-9A-Z_a-z]+(\([^)]+\))?)*\]))|[^%[:cntrl:]])*$ + type: string + name: + description: name specifies the name of the HTTP header (for example, "unique-id") that the ingress controller should inject into HTTP requests. The field's value must be a valid HTTP header name as defined in RFC 2616 section 4.2. If the field is empty, no header is injected. + maxLength: 1024 + minLength: 0 + pattern: ^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + type: object + type: object + logging: + description: logging defines parameters for what should be logged where. If this field is empty, operational logs are enabled but access logs are disabled. + properties: + access: + description: "access describes how the client requests should be logged. \n If this field is empty, access logging is disabled." + properties: + destination: + description: destination is where access logs go. + properties: + container: + description: container holds parameters for the Container logging destination. Present only if type is Container. + properties: + maxLength: + default: 1024 + description: "maxLength is the maximum length of the log message. \n Valid values are integers in the range 480 to 8192, inclusive. \n When omitted, the default value is 1024." + format: int32 + maximum: 8192 + minimum: 480 + type: integer + type: object + syslog: + description: syslog holds parameters for a syslog endpoint. Present only if type is Syslog. + oneOf: + - properties: + address: + format: ipv4 + - properties: + address: + format: ipv6 + properties: + address: + description: address is the IP address of the syslog endpoint that receives log messages. + type: string + facility: + description: "facility specifies the syslog facility of log messages. \n If this field is empty, the facility is \"local1\"." + enum: + - kern + - user + - mail + - daemon + - auth + - syslog + - lpr + - news + - uucp + - cron + - auth2 + - ftp + - ntp + - audit + - alert + - cron2 + - local0 + - local1 + - local2 + - local3 + - local4 + - local5 + - local6 + - local7 + type: string + maxLength: + default: 1024 + description: "maxLength is the maximum length of the log message. \n Valid values are integers in the range 480 to 4096, inclusive. \n When omitted, the default value is 1024." + format: int32 + maximum: 4096 + minimum: 480 + type: integer + port: + description: port is the UDP port number of the syslog endpoint that receives log messages. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + type: + description: "type is the type of destination for logs. It must be one of the following: \n * Container \n The ingress operator configures the sidecar container named \"logs\" on the ingress controller pod and configures the ingress controller to write logs to the sidecar. The logs are then available as container logs. The expectation is that the administrator configures a custom logging solution that reads logs from this sidecar. Note that using container logs means that logs may be dropped if the rate of logs exceeds the container runtime's or the custom logging solution's capacity. \n * Syslog \n Logs are sent to a syslog endpoint. The administrator must specify an endpoint that can receive syslog messages. The expectation is that the administrator has configured a custom syslog instance." + enum: + - Container + - Syslog + type: string + required: + - type + type: object + httpCaptureCookies: + description: httpCaptureCookies specifies HTTP cookies that should be captured in access logs. If this field is empty, no cookies are captured. + items: + description: IngressControllerCaptureHTTPCookie describes an HTTP cookie that should be captured. + properties: + matchType: + description: matchType specifies the type of match to be performed on the cookie name. Allowed values are "Exact" for an exact string match and "Prefix" for a string prefix match. If "Exact" is specified, a name must be specified in the name field. If "Prefix" is provided, a prefix must be specified in the namePrefix field. For example, specifying matchType "Prefix" and namePrefix "foo" will capture a cookie named "foo" or "foobar" but not one named "bar". The first matching cookie is captured. + enum: + - Exact + - Prefix + type: string + maxLength: + description: maxLength specifies a maximum length of the string that will be logged, which includes the cookie name, cookie value, and one-character delimiter. If the log entry exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + maximum: 1024 + minimum: 1 + type: integer + name: + description: name specifies a cookie name. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + namePrefix: + description: namePrefix specifies a cookie name prefix. Its value must be a valid HTTP cookie name as defined in RFC 6265 section 4.1. + maxLength: 1024 + minLength: 0 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]*$ + type: string + required: + - matchType + - maxLength + type: object + maxItems: 1 + nullable: true + type: array + httpCaptureHeaders: + description: "httpCaptureHeaders defines HTTP headers that should be captured in access logs. If this field is empty, no headers are captured. \n Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be captured for TLS passthrough connections." + properties: + request: + description: "request specifies which HTTP request headers to capture. \n If this field is empty, no request headers are captured." + items: + description: IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + minimum: 1 + type: integer + name: + description: name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + response: + description: "response specifies which HTTP response headers to capture. \n If this field is empty, no response headers are captured." + items: + description: IngressControllerCaptureHTTPHeader describes an HTTP header that should be captured. + properties: + maxLength: + description: maxLength specifies a maximum length for the header value. If a header value exceeds this length, the value will be truncated in the log message. Note that the ingress controller may impose a separate bound on the total length of HTTP headers in a request. + minimum: 1 + type: integer + name: + description: name specifies a header name. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + required: + - maxLength + - name + type: object + nullable: true + type: array + type: object + httpLogFormat: + description: "httpLogFormat specifies the format of the log message for an HTTP request. \n If this field is empty, log messages use the implementation's default HTTP log format. For HAProxy's default HTTP log format, see the HAProxy documentation: http://cbonte.github.io/haproxy-dconv/2.0/configuration.html#8.2.3 \n Note that this format only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). It does not affect the log format for TLS passthrough connections." + type: string + logEmptyRequests: + default: Log + description: logEmptyRequests specifies how connections on which no request is received should be logged. Typically, these empty requests come from load balancers' health probes or Web browsers' speculative connections ("preconnect"), in which case logging these requests may be undesirable. However, these requests may also be caused by network errors, in which case logging empty requests may be useful for diagnosing the errors. In addition, these requests may be caused by port scans, in which case logging empty requests may aid in detecting intrusion attempts. Allowed values for this field are "Log" and "Ignore". The default value is "Log". + enum: + - Log + - Ignore + type: string + required: + - destination + type: object + type: object + namespaceSelector: + description: "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards. \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + nodePlacement: + description: "nodePlacement enables explicit control over the scheduling of the ingress controller. \n If unset, defaults are used. See NodePlacement for more details." + properties: + nodeSelector: + description: "nodeSelector is the node selector applied to ingress controller deployments. \n If set, the specified selector is used and replaces the default. \n If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status. \n When defaultPlacement is Workers, the default is: \n kubernetes.io/os: linux node-role.kubernetes.io/worker: '' \n When defaultPlacement is ControlPlane, the default is: \n kubernetes.io/os: linux node-role.kubernetes.io/master: '' \n These defaults are subject to change. \n Note that using nodeSelector.matchExpressions is not supported. Only nodeSelector.matchLabels may be used. This is a limitation of the Kubernetes API: the pod spec does not allow complex expressions for node selectors." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + tolerations: + description: "tolerations is a list of tolerations applied to ingress controller deployments. \n The default is an empty list. \n See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/" + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + description: "replicas is the desired number of ingress controller replicas. If unset, the default depends on the value of the defaultPlacement field in the cluster config.openshift.io/v1/ingresses status. \n The value of replicas is set based on the value of a chosen field in the Infrastructure CR. If defaultPlacement is set to ControlPlane, the chosen field will be controlPlaneTopology. If it is set to Workers the chosen field will be infrastructureTopology. Replicas will then be set to 1 or 2 based whether the chosen field's value is SingleReplica or HighlyAvailable, respectively. \n These defaults are subject to change." + format: int32 + type: integer + routeAdmission: + description: "routeAdmission defines a policy for handling new route claims (for example, to allow or deny claims across namespaces). \n If empty, defaults will be applied. See specific routeAdmission fields for details about their defaults." + properties: + namespaceOwnership: + description: "namespaceOwnership describes how host name claims across namespaces should be handled. \n Value must be one of: \n - Strict: Do not allow routes in different namespaces to claim the same host. \n - InterNamespaceAllowed: Allow routes to claim different paths of the same host name across namespaces. \n If empty, the default is Strict." + enum: + - InterNamespaceAllowed + - Strict + type: string + wildcardPolicy: + description: "wildcardPolicy describes how routes with wildcard policies should be handled for the ingress controller. WildcardPolicy controls use of routes [1] exposed by the ingress controller based on the route's wildcard policy. \n [1] https://github.com/openshift/api/blob/master/route/v1/types.go \n Note: Updating WildcardPolicy from WildcardsAllowed to WildcardsDisallowed will cause admitted routes with a wildcard policy of Subdomain to stop working. These routes must be updated to a wildcard policy of None to be readmitted by the ingress controller. \n WildcardPolicy supports WildcardsAllowed and WildcardsDisallowed values. \n If empty, defaults to \"WildcardsDisallowed\"." + enum: + - WildcardsAllowed + - WildcardsDisallowed + type: string + type: object + routeSelector: + description: "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards. \n If unset, the default is no filtering." + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. \n If unset, the default is based on the apiservers.config.openshift.io/cluster resource. \n Note that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout." + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + nullable: true + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + nullable: true + type: object + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + nullable: true + type: object + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + nullable: true + type: object + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + enum: + - Old + - Intermediate + - Modern + - Custom + type: string + type: object + tuningOptions: + anyOf: + - properties: + maxConnections: + enum: + - -1 + - 0 + - properties: + maxConnections: + format: int32 + maximum: 2000000 + minimum: 2000 + description: "tuningOptions defines parameters for adjusting the performance of ingress controller pods. All fields are optional and will use their respective defaults if not set. See specific tuningOptions fields for more details. \n Setting fields within tuningOptions is generally not recommended. The default values are suitable for most configurations." + properties: + clientFinTimeout: + description: "clientFinTimeout defines how long a connection will be held open while waiting for the client response to the server/backend closing the connection. \n If unset, the default timeout is 1s" + format: duration + type: string + clientTimeout: + description: "clientTimeout defines how long a connection will be held open while waiting for a client response. \n If unset, the default timeout is 30s" + format: duration + type: string + headerBufferBytes: + description: "headerBufferBytes describes how much memory should be reserved (in bytes) for IngressController connection sessions. Note that this value must be at least 16384 if HTTP/2 is enabled for the IngressController (https://tools.ietf.org/html/rfc7540). If this field is empty, the IngressController will use a default value of 32768 bytes. \n Setting this field is generally not recommended as headerBufferBytes values that are too small may break the IngressController and headerBufferBytes values that are too large could cause the IngressController to use significantly more memory than necessary." + format: int32 + minimum: 16384 + type: integer + headerBufferMaxRewriteBytes: + description: "headerBufferMaxRewriteBytes describes how much memory should be reserved (in bytes) from headerBufferBytes for HTTP header rewriting and appending for IngressController connection sessions. Note that incoming HTTP requests will be limited to (headerBufferBytes - headerBufferMaxRewriteBytes) bytes, meaning headerBufferBytes must be greater than headerBufferMaxRewriteBytes. If this field is empty, the IngressController will use a default value of 8192 bytes. \n Setting this field is generally not recommended as headerBufferMaxRewriteBytes values that are too small may break the IngressController and headerBufferMaxRewriteBytes values that are too large could cause the IngressController to use significantly more memory than necessary." + format: int32 + minimum: 4096 + type: integer + healthCheckInterval: + description: "healthCheckInterval defines how long the router waits between two consecutive health checks on its configured backends. This value is applied globally as a default for all routes, but may be overridden per-route by the route annotation \"router.openshift.io/haproxy.health.check.interval\". \n Expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\". \n Setting this to less than 5s can cause excess traffic due to too frequent TCP health checks and accompanying SYN packet storms. Alternatively, setting this too high can result in increased latency, due to backend servers that are no longer available, but haven't yet been detected as such. \n An empty or zero healthCheckInterval means no opinion and IngressController chooses a default, which is subject to change over time. Currently the default healthCheckInterval value is 5s. \n Currently the minimum allowed value is 1s and the maximum allowed value is 2147483647ms (24.85 days). Both are subject to change over time." + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + type: string + maxConnections: + description: "maxConnections defines the maximum number of simultaneous connections that can be established per HAProxy process. Increasing this value allows each ingress controller pod to handle more connections but at the cost of additional system resources being consumed. \n Permitted values are: empty, 0, -1, and the range 2000-2000000. \n If this field is empty or 0, the IngressController will use the default value of 50000, but the default is subject to change in future releases. \n If the value is -1 then HAProxy will dynamically compute a maximum value based on the available ulimits in the running container. Selecting -1 (i.e., auto) will result in a large value being computed (~520000 on OpenShift >=4.10 clusters) and therefore each HAProxy process will incur significant memory usage compared to the current default of 50000. \n Setting a value that is greater than the current operating system limit will prevent the HAProxy process from starting. \n If you choose a discrete value (e.g., 750000) and the router pod is migrated to a new node, there's no guarantee that that new node has identical ulimits configured. In such a scenario the pod would fail to start. If you have nodes with different ulimits configured (e.g., different tuned profiles) and you choose a discrete value then the guidance is to use -1 and let the value be computed dynamically at runtime. \n You can monitor memory usage for router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}'. \n You can monitor memory usage of individual HAProxy processes in router containers with the following metric: 'container_memory_working_set_bytes{container=\"router\",namespace=\"openshift-ingress\"}/container_processes{container=\"router\",namespace=\"openshift-ingress\"}'." + format: int32 + type: integer + reloadInterval: + description: "reloadInterval defines the minimum interval at which the router is allowed to reload to accept new changes. Increasing this value can prevent the accumulation of HAProxy processes, depending on the scenario. Increasing this interval can also lessen load imbalance on a backend's servers when using the roundrobin balancing algorithm. Alternatively, decreasing this value may decrease latency since updates to HAProxy's configuration can take effect more quickly. \n The value must be a time duration value; see . Currently, the minimum value allowed is 1s, and the maximum allowed value is 120s. Minimum and maximum allowed values may change in future versions of OpenShift. Note that if a duration outside of these bounds is provided, the value of reloadInterval will be capped/floored and not rejected (e.g. a duration of over 120s will be capped to 120s; the IngressController will not reject and replace this disallowed value with the default). \n A zero value for reloadInterval tells the IngressController to choose the default, which is currently 5s and subject to change without notice. \n This field expects an unsigned duration string of decimal numbers, each with optional fraction and a unit suffix, e.g. \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\" U+00B5 or \"μs\" U+03BC), \"ms\", \"s\", \"m\", \"h\". \n Note: Setting a value significantly larger than the default of 5s can cause latency in observing updates to routes and their endpoints. HAProxy's configuration will be reloaded less frequently, and newly created routes will not be served until the subsequent reload." + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|μs|ms|s|m|h))+)$ + type: string + serverFinTimeout: + description: "serverFinTimeout defines how long a connection will be held open while waiting for the server/backend response to the client closing the connection. \n If unset, the default timeout is 1s" + format: duration + type: string + serverTimeout: + description: "serverTimeout defines how long a connection will be held open while waiting for a server/backend response. \n If unset, the default timeout is 30s" + format: duration + type: string + threadCount: + description: "threadCount defines the number of threads created per HAProxy process. Creating more threads allows each ingress controller pod to handle more connections, at the cost of more system resources being used. HAProxy currently supports up to 64 threads. If this field is empty, the IngressController will use the default value. The current default is 4 threads, but this may change in future releases. \n Setting this field is generally not recommended. Increasing the number of HAProxy threads allows ingress controller pods to utilize more CPU time under load, potentially starving other pods if set too high. Reducing the number of threads may cause the ingress controller to perform poorly." + format: int32 + maximum: 64 + minimum: 1 + type: integer + tlsInspectDelay: + description: "tlsInspectDelay defines how long the router can hold data to find a matching route. \n Setting this too short can cause the router to fall back to the default certificate for edge-terminated or reencrypt routes even when a better matching certificate could be used. \n If unset, the default inspect delay is 5s" + format: duration + type: string + tunnelTimeout: + description: "tunnelTimeout defines how long a tunnel connection (including websockets) will be held open while the tunnel is idle. \n If unset, the default timeout is 1h" + format: duration + type: string + type: object + unsupportedConfigOverrides: + description: unsupportedConfigOverrides allows specifying unsupported configuration options. Its use is unsupported. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the IngressController. + properties: + availableReplicas: + description: availableReplicas is number of observed available replicas according to the ingress controller deployment. + format: int32 + type: integer + conditions: + description: "conditions is a list of conditions and their status. \n Available means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas) \n There are additional conditions which indicate the status of other ingress controller features and capabilities. \n * LoadBalancerManaged - True if the following conditions are met: * The endpoint publishing strategy requires a service load balancer. - False if any of those conditions are unsatisfied. \n * LoadBalancerReady - True if the following conditions are met: * A load balancer is managed. * The load balancer is ready. - False if any of those conditions are unsatisfied. \n * DNSManaged - True if the following conditions are met: * The endpoint publishing strategy and platform support DNS. * The ingress controller domain is set. * dns.config.openshift.io/cluster configures DNS zones. - False if any of those conditions are unsatisfied. \n * DNSReady - True if the following conditions are met: * DNS is managed. * DNS records have been successfully created. - False if any of those conditions are unsatisfied." + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + domain: + description: domain is the actual domain in use. + type: string + endpointPublishingStrategy: + description: endpointPublishingStrategy is the actual strategy in use. + properties: + hostNetwork: + description: hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork. + properties: + httpPort: + default: 80 + description: httpPort is the port on the host which should be used to listen for HTTP requests. This field should be set when port 80 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 80. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + httpsPort: + default: 443 + description: httpsPort is the port on the host which should be used to listen for HTTPS requests. This field should be set when port 443 is already in use. The value should not coincide with the NodePort range of the cluster. When the value is 0 or is not specified it defaults to 443. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + statsPort: + default: 1936 + description: statsPort is the port on the host where the stats from the router are published. The value should not coincide with the NodePort range of the cluster. If an external load balancer is configured to forward connections to this IngressController, the load balancer should use this port for health checks. The load balancer can send HTTP probes on this port on a given node, with the path /healthz/ready to determine if the ingress controller is ready to receive traffic on the node. For proper operation the load balancer must not forward traffic to a node until the health check reports ready. The load balancer should also stop forwarding requests within a maximum of 45 seconds after /healthz/ready starts reporting not-ready. Probing every 5 to 10 seconds, with a 5-second timeout and with a threshold of two successful or failed requests to become healthy or unhealthy respectively, are well-tested values. When the value is 0 or is not specified it defaults to 1936. + format: int32 + maximum: 65535 + minimum: 0 + type: integer + type: object + loadBalancer: + description: loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService. + properties: + allowedSourceRanges: + description: "allowedSourceRanges specifies an allowlist of IP address ranges to which access to the load balancer should be restricted. Each range must be specified using CIDR notation (e.g. \"10.0.0.0/8\" or \"fd00::/8\"). If no range is specified, \"0.0.0.0/0\" for IPv4 and \"::/0\" for IPv6 are used by default, which allows all source addresses. \n To facilitate migration from earlier versions of OpenShift that did not have the allowedSourceRanges field, you may set the service.beta.kubernetes.io/load-balancer-source-ranges annotation on the \"router-\" service in the \"openshift-ingress\" namespace, and this annotation will take effect if allowedSourceRanges is empty on OpenShift 4.12." + items: + description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). + pattern: (^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + type: string + nullable: true + type: array + dnsManagementPolicy: + default: Managed + description: 'dnsManagementPolicy indicates if the lifecycle of the wildcard DNS record associated with the load balancer service will be managed by the ingress operator. It defaults to Managed. Valid values are: Managed and Unmanaged.' + enum: + - Managed + - Unmanaged + type: string + providerParameters: + description: "providerParameters holds desired load balancer information specific to the underlying infrastructure provider. \n If empty, defaults will be applied. See specific providerParameters fields for details about their defaults." + properties: + aws: + description: "aws provides configuration settings that are specific to AWS load balancers. \n If empty, defaults will be applied. See specific aws fields for details about their defaults." + properties: + classicLoadBalancer: + description: classicLoadBalancerParameters holds configuration parameters for an AWS classic load balancer. Present only if type is Classic. + properties: + connectionIdleTimeout: + description: connectionIdleTimeout specifies the maximum time period that a connection may be idle before the load balancer closes the connection. The value must be parseable as a time duration value; see . A nil or zero value means no opinion, in which case a default value is used. The default value for this field is 60s. This default is subject to change. + format: duration + type: string + type: object + networkLoadBalancer: + description: networkLoadBalancerParameters holds configuration parameters for an AWS network load balancer. Present only if type is NLB. + type: object + type: + description: "type is the type of AWS load balancer to instantiate for an ingresscontroller. \n Valid values are: \n * \"Classic\": A Classic Load Balancer that makes routing decisions at either the transport layer (TCP/SSL) or the application layer (HTTP/HTTPS). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb \n * \"NLB\": A Network Load Balancer that makes routing decisions at the transport layer (TCP/SSL). See the following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - Classic + - NLB + type: string + required: + - type + type: object + gcp: + description: "gcp provides configuration settings that are specific to GCP load balancers. \n If empty, defaults will be applied. See specific gcp fields for details about their defaults." + properties: + clientAccess: + description: "clientAccess describes how client access is restricted for internal load balancers. \n Valid values are: * \"Global\": Specifying an internal load balancer with Global client access allows clients from any region within the VPC to communicate with the load balancer. \n https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#global_access \n * \"Local\": Specifying an internal load balancer with Local client access means only clients within the same region (and VPC) as the GCP load balancer can communicate with the load balancer. Note that this is the default behavior. \n https://cloud.google.com/load-balancing/docs/internal#client_access" + enum: + - Global + - Local + type: string + type: object + ibm: + description: "ibm provides configuration settings that are specific to IBM Cloud load balancers. \n If empty, defaults will be applied. See specific ibm fields for details about their defaults." + properties: + protocol: + description: "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\" \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled." + enum: + - "" + - TCP + - PROXY + type: string + type: object + type: + description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". + enum: + - AWS + - Azure + - BareMetal + - GCP + - Nutanix + - OpenStack + - VSphere + - IBM + type: string + required: + - type + type: object + scope: + description: scope indicates the scope at which the load balancer is exposed. Possible values are "External" and "Internal". + enum: + - Internal + - External + type: string + required: + - dnsManagementPolicy + - scope + type: object + nodePort: + description: nodePort holds parameters for the NodePortService endpoint publishing strategy. Present only if type is NodePortService. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + private: + description: private holds parameters for the Private endpoint publishing strategy. Present only if type is Private. + properties: + protocol: + description: "protocol specifies whether the IngressController expects incoming connections to use plain TCP or whether the IngressController expects PROXY protocol. \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n The following values are valid for this field: \n * The empty string. * \"TCP\". * \"PROXY\". \n The empty string specifies the default, which is TCP without PROXY protocol. Note that the default is subject to change." + enum: + - "" + - TCP + - PROXY + type: string + type: object + type: + description: "type is the publishing strategy to use. Valid values are: \n * LoadBalancerService \n Publishes the ingress controller using a Kubernetes LoadBalancer Service. \n In this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment. \n See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer \n If domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone. \n Wildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms. \n * HostNetwork \n Publishes the ingress controller on node ports where the ingress controller is deployed. \n In this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports. \n * Private \n Does not publish the ingress controller. \n In this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller. \n * NodePortService \n Publishes the ingress controller using a Kubernetes NodePort Service. \n In this configuration, the ingress controller deployment uses container networking. A NodePort Service is created to publish the deployment. The specific node ports are dynamically allocated by OpenShift; however, to support static port allocations, user changes to the node port field of the managed NodePort Service will preserved." + enum: + - LoadBalancerService + - HostNetwork + - Private + - NodePortService + type: string + required: + - type + type: object + namespaceSelector: + description: namespaceSelector is the actual namespaceSelector in use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + observedGeneration: + description: observedGeneration is the most recent generation observed. + format: int64 + type: integer + routeSelector: + description: routeSelector is the actual routeSelector in use. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + selector: + description: selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas. + type: string + tlsProfile: + description: tlsProfile is the TLS connection configuration that is in effect. + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + items: + type: string + type: array + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.availableReplicas + status: {} diff --git a/test/integration/framework/assets/0alertmanagerConfigCustomResourceDefinition.yaml b/test/integration/framework/assets/0alertmanagerConfigCustomResourceDefinition.yaml new file mode 100644 index 0000000000..711dc0c4d0 --- /dev/null +++ b/test/integration/framework/assets/0alertmanagerConfigCustomResourceDefinition.yaml @@ -0,0 +1,4296 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.70.0 + name: alertmanagerconfigs.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: AlertmanagerConfig + listKind: AlertmanagerConfigList + plural: alertmanagerconfigs + shortNames: + - amcfg + singular: alertmanagerconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: AlertmanagerConfig configures the Prometheus Alertmanager, specifying how alerts should be grouped, inhibited and notified to external systems. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AlertmanagerConfigSpec is a specification of the desired behavior of the Alertmanager configuration. By definition, the Alertmanager configuration only applies to alerts for which the `namespace` label is equal to the namespace of the AlertmanagerConfig resource. + properties: + inhibitRules: + description: List of inhibition rules. The rules will only apply to alerts matching the resource's namespace. + items: + description: InhibitRule defines an inhibition rule that allows to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule + properties: + equal: + description: Labels that must have an equal value in the source and target alert for the inhibition to take effect. + items: + type: string + type: array + sourceMatch: + description: Matchers for which one or more alerts have to exist for the inhibition to take effect. The operator enforces that the alert matches the resource's namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager >= v0.22.0 and takes precedence over Regex (deprecated) if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: 'Whether to match on equality (false) or regular-expression (true). Deprecated: for AlertManager >= v0.22.0, `matchType` should be used instead.' + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + targetMatch: + description: Matchers that have to be fulfilled in the alerts to be muted. The operator enforces that the alert matches the resource's namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager >= v0.22.0 and takes precedence over Regex (deprecated) if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: 'Whether to match on equality (false) or regular-expression (true). Deprecated: for AlertManager >= v0.22.0, `matchType` should be used instead.' + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + type: object + type: array + muteTimeIntervals: + description: List of MuteTimeInterval specifying when the routes should be muted. + items: + description: MuteTimeInterval specifies the periods in time when notifications will be muted + properties: + name: + description: Name of the time interval + type: string + timeIntervals: + description: TimeIntervals is a list of TimeInterval + items: + description: TimeInterval describes intervals of time + properties: + daysOfMonth: + description: DaysOfMonth is a list of DayOfMonthRange + items: + description: DayOfMonthRange is an inclusive range of days of the month beginning at 1 + properties: + end: + description: End of the inclusive range + maximum: 31 + minimum: -31 + type: integer + start: + description: Start of the inclusive range + maximum: 31 + minimum: -31 + type: integer + type: object + type: array + months: + description: Months is a list of MonthRange + items: + description: MonthRange is an inclusive range of months of the year beginning in January Months can be specified by name (e.g 'January') by numerical month (e.g '1') or as an inclusive range (e.g 'January:March', '1:3', '1:March') + pattern: ^((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12])(?:((:((?i)january|february|march|april|may|june|july|august|september|october|november|december|[1-12]))$)|$) + type: string + type: array + times: + description: Times is a list of TimeRange + items: + description: TimeRange defines a start and end time in 24hr format + properties: + endTime: + description: EndTime is the end time in 24hr format. + pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$) + type: string + startTime: + description: StartTime is the start time in 24hr format. + pattern: ^((([01][0-9])|(2[0-3])):[0-5][0-9])$|(^24:00$) + type: string + type: object + type: array + weekdays: + description: Weekdays is a list of WeekdayRange + items: + description: WeekdayRange is an inclusive range of days of the week beginning on Sunday Days can be specified by name (e.g 'Sunday') or as an inclusive range (e.g 'Monday:Friday') + pattern: ^((?i)sun|mon|tues|wednes|thurs|fri|satur)day(?:((:(sun|mon|tues|wednes|thurs|fri|satur)day)$)|$) + type: string + type: array + years: + description: Years is a list of YearRange + items: + description: YearRange is an inclusive range of years + pattern: ^2\d{3}(?::2\d{3}|$) + type: string + type: array + type: object + type: array + type: object + type: array + receivers: + description: List of receivers. + items: + description: Receiver defines one or more notification integrations. + properties: + discordConfigs: + description: List of Discord configurations. + items: + description: DiscordConfig configures notifications via Discord. See https://prometheus.io/docs/alerting/latest/configuration/#discord_config + properties: + apiURL: + description: The secret's key that contains the Discord webhook URL. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: The template of the message's body. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + title: + description: The template of the message's title. + type: string + required: + - apiURL + type: object + type: array + emailConfigs: + description: List of Email configurations. + items: + description: EmailConfig configures notifications via Email. + properties: + authIdentity: + description: The identity to use for authentication. + type: string + authPassword: + description: The secret's key that contains the password to use for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authSecret: + description: The secret's key that contains the CRAM-MD5 secret. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authUsername: + description: The username to use for authentication. + type: string + from: + description: The sender address. + type: string + headers: + description: Further headers email header key/value pairs. Overrides any headers previously set by the notification implementation. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + hello: + description: The hostname to identify to the SMTP server. + type: string + html: + description: The HTML body of the email notification. + type: string + requireTLS: + description: The SMTP TLS requirement. Note that Go does not support unencrypted connections to remote SMTP endpoints. + type: boolean + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + smarthost: + description: The SMTP host and port through which emails are sent. E.g. example.com:25 + type: string + text: + description: The text body of the email notification. + type: string + tlsConfig: + description: TLS configuration + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + to: + description: The email address to send notifications to. + type: string + type: object + type: array + msteamsConfigs: + description: List of MSTeams configurations. It requires Alertmanager >= 0.26.0. + items: + description: MSTeamsConfig configures notifications via Microsoft Teams. It requires Alertmanager >= 0.26.0. + properties: + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + sendResolved: + description: Whether to notify about resolved alerts. + type: boolean + text: + description: Message body template. + type: string + title: + description: Message title template. + type: string + webhookUrl: + description: MSTeams webhook URL. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - webhookUrl + type: object + type: array + name: + description: Name of the receiver. Must be unique across all items from the list. + minLength: 1 + type: string + opsgenieConfigs: + description: List of OpsGenie configurations. + items: + description: OpsGenieConfig configures notifications via OpsGenie. See https://prometheus.io/docs/alerting/latest/configuration/#opsgenie_config + properties: + actions: + description: Comma separated list of actions that will be available for the alert. + type: string + apiKey: + description: The secret's key that contains the OpsGenie API key. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + apiURL: + description: The URL to send OpsGenie API requests to. + type: string + description: + description: Description of the incident. + type: string + details: + description: A set of arbitrary key/value pairs that provide further detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + entity: + description: Optional field that can be used to specify which domain alert is related to. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: Alert text limited to 130 characters. + type: string + note: + description: Additional alert note. + type: string + priority: + description: Priority level of alert. Possible values are P1, P2, P3, P4, and P5. + type: string + responders: + description: List of responders responsible for notifications. + items: + description: OpsGenieConfigResponder defines a responder to an incident. One of `id`, `name` or `username` has to be defined. + properties: + id: + description: ID of the responder. + type: string + name: + description: Name of the responder. + type: string + type: + description: Type of responder. + enum: + - team + - teams + - user + - escalation + - schedule + minLength: 1 + type: string + username: + description: Username of the responder. + type: string + required: + - type + type: object + type: array + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + source: + description: Backlink to the sender of the notification. + type: string + tags: + description: Comma separated list of tags attached to the notifications. + type: string + updateAlerts: + description: Whether to update message and description of the alert in OpsGenie if it already exists By default, the alert is never updated in OpsGenie, the new message only appears in activity log. + type: boolean + type: object + type: array + pagerdutyConfigs: + description: List of PagerDuty configurations. + items: + description: PagerDutyConfig configures notifications via PagerDuty. See https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config + properties: + class: + description: The class/type of the event. + type: string + client: + description: Client identification. + type: string + clientURL: + description: Backlink to the sender of notification. + type: string + component: + description: The part or component of the affected system that is broken. + type: string + description: + description: Description of the incident. + type: string + details: + description: Arbitrary key/value pairs that provide further detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + group: + description: A cluster or grouping of sources. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + pagerDutyImageConfigs: + description: A list of image details to attach that provide further detail about an incident. + items: + description: PagerDutyImageConfig attaches images to an incident + properties: + alt: + description: Alt is the optional alternative text for the image. + type: string + href: + description: Optional URL; makes the image a clickable link. + type: string + src: + description: Src of the image being attached to the incident + type: string + type: object + type: array + pagerDutyLinkConfigs: + description: A list of link details to attach that provide further detail about an incident. + items: + description: PagerDutyLinkConfig attaches text links to an incident + properties: + alt: + description: Text that describes the purpose of the link, and can be used as the link's text. + type: string + href: + description: Href is the URL of the link to be attached + type: string + type: object + type: array + routingKey: + description: The secret's key that contains the PagerDuty integration key (when using Events API v2). Either this field or `serviceKey` needs to be defined. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + serviceKey: + description: The secret's key that contains the PagerDuty service key (when using integration type "Prometheus"). Either this field or `routingKey` needs to be defined. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + severity: + description: Severity of the incident. + type: string + url: + description: The URL to send requests to. + type: string + type: object + type: array + pushoverConfigs: + description: List of Pushover configurations. + items: + description: PushoverConfig configures notifications via Pushover. See https://prometheus.io/docs/alerting/latest/configuration/#pushover_config + properties: + device: + description: The name of a device to send the notification to + type: string + expire: + description: How long your notification will continue to be retried for, unless the user acknowledges the notification. + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + type: string + html: + description: Whether notification message is HTML or plain text. + type: boolean + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: Notification message. + type: string + priority: + description: Priority, see https://pushover.net/api#priority + type: string + retry: + description: How often the Pushover servers will send the same notification to the user. Must be at least 30 seconds. + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + sound: + description: The name of one of the sounds supported by device clients to override the user's default sound choice + type: string + title: + description: Notification title. + type: string + token: + description: The secret's key that contains the registered application's API token, see https://pushover.net/apps. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. Either `token` or `tokenFile` is required. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + tokenFile: + description: The token file that contains the registered application's API token, see https://pushover.net/apps. Either `token` or `tokenFile` is required. It requires Alertmanager >= v0.26.0. + type: string + url: + description: A supplementary URL shown alongside the message. + type: string + urlTitle: + description: A title for supplementary URL, otherwise just the URL is shown + type: string + userKey: + description: The secret's key that contains the recipient user's user key. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. Either `userKey` or `userKeyFile` is required. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + userKeyFile: + description: The user key file that contains the recipient user's user key. Either `userKey` or `userKeyFile` is required. It requires Alertmanager >= v0.26.0. + type: string + type: object + type: array + slackConfigs: + description: List of Slack configurations. + items: + description: SlackConfig configures notifications via Slack. See https://prometheus.io/docs/alerting/latest/configuration/#slack_config + properties: + actions: + description: A list of Slack actions that are sent with each notification. + items: + description: SlackAction configures a single Slack action that is sent with each notification. See https://api.slack.com/docs/message-attachments#action_fields and https://api.slack.com/docs/message-buttons for more information. + properties: + confirm: + description: SlackConfirmationField protect users from destructive actions or particularly distinguished decisions by asking them to confirm their button click one more time. See https://api.slack.com/docs/interactive-message-field-guide#confirmation_fields for more information. + properties: + dismissText: + type: string + okText: + type: string + text: + minLength: 1 + type: string + title: + type: string + required: + - text + type: object + name: + type: string + style: + type: string + text: + minLength: 1 + type: string + type: + minLength: 1 + type: string + url: + type: string + value: + type: string + required: + - text + - type + type: object + type: array + apiURL: + description: The secret's key that contains the Slack webhook URL. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + callbackId: + type: string + channel: + description: The channel or user to send notifications to. + type: string + color: + type: string + fallback: + type: string + fields: + description: A list of Slack fields that are sent with each notification. + items: + description: SlackField configures a single Slack field that is sent with each notification. Each field must contain a title, value, and optionally, a boolean value to indicate if the field is short enough to be displayed next to other fields designated as short. See https://api.slack.com/docs/message-attachments#fields for more information. + properties: + short: + type: boolean + title: + minLength: 1 + type: string + value: + minLength: 1 + type: string + required: + - title + - value + type: object + type: array + footer: + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + iconEmoji: + type: string + iconURL: + type: string + imageURL: + type: string + linkNames: + type: boolean + mrkdwnIn: + items: + type: string + type: array + pretext: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + shortFields: + type: boolean + text: + type: string + thumbURL: + type: string + title: + type: string + titleLink: + type: string + username: + type: string + type: object + type: array + snsConfigs: + description: List of SNS configurations + items: + description: SNSConfig configures notifications via AWS SNS. See https://prometheus.io/docs/alerting/latest/configuration/#sns_configs + properties: + apiURL: + description: The SNS API URL i.e. https://sns.us-east-2.amazonaws.com. If not specified, the SNS API URL from the SNS SDK will be used. + type: string + attributes: + additionalProperties: + type: string + description: SNS message attributes. + type: object + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: The message content of the SNS notification. + type: string + phoneNumber: + description: Phone number if message is delivered via SMS in E.164 format. If you don't specify this value, you must specify a value for the TopicARN or TargetARN. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + sigv4: + description: Configures AWS's Signature Verification 4 signing process to sign requests. + properties: + accessKey: + description: AccessKey is the AWS API key. If not specified, the environment variable `AWS_ACCESS_KEY_ID` is used. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + profile: + description: Profile is the named AWS profile used to authenticate. + type: string + region: + description: Region is the AWS region. If blank, the region from the default credentials chain used. + type: string + roleArn: + description: RoleArn is the named AWS profile used to authenticate. + type: string + secretKey: + description: SecretKey is the AWS API secret. If not specified, the environment variable `AWS_SECRET_ACCESS_KEY` is used. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + subject: + description: Subject line when the message is delivered to email endpoints. + type: string + targetARN: + description: The mobile platform endpoint ARN if message is delivered via mobile notifications. If you don't specify this value, you must specify a value for the topic_arn or PhoneNumber. + type: string + topicARN: + description: SNS topic ARN, i.e. arn:aws:sns:us-east-2:698519295917:My-Topic If you don't specify this value, you must specify a value for the PhoneNumber or TargetARN. + type: string + type: object + type: array + telegramConfigs: + description: List of Telegram configurations. + items: + description: TelegramConfig configures notifications via Telegram. See https://prometheus.io/docs/alerting/latest/configuration/#telegram_config + properties: + apiURL: + description: The Telegram API URL i.e. https://api.telegram.org. If not specified, default API URL will be used. + type: string + botToken: + description: "Telegram bot token. It is mutually exclusive with `botTokenFile`. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. \n Either `botToken` or `botTokenFile` is required." + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + botTokenFile: + description: "File to read the Telegram bot token from. It is mutually exclusive with `botToken`. Either `botToken` or `botTokenFile` is required. \n It requires Alertmanager >= v0.26.0." + type: string + chatID: + description: The Telegram chat ID. + format: int64 + type: integer + disableNotifications: + description: Disable telegram notifications + type: boolean + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: Message template + type: string + parseMode: + description: Parse mode for telegram message + enum: + - MarkdownV2 + - Markdown + - HTML + type: string + sendResolved: + description: Whether to notify about resolved alerts. + type: boolean + type: object + type: array + victoropsConfigs: + description: List of VictorOps configurations. + items: + description: VictorOpsConfig configures notifications via VictorOps. See https://prometheus.io/docs/alerting/latest/configuration/#victorops_config + properties: + apiKey: + description: The secret's key that contains the API key to use when talking to the VictorOps API. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + apiUrl: + description: The VictorOps API URL. + type: string + customFields: + description: Additional custom fields for notification. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + entityDisplayName: + description: Contains summary of the alerted problem. + type: string + httpConfig: + description: The HTTP client's configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + messageType: + description: Describes the behavior of the alert (CRITICAL, WARNING, INFO). + type: string + monitoringTool: + description: The monitoring tool the state message is from. + type: string + routingKey: + description: A key used to map the alert to a team. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + stateMessage: + description: Contains long explanation of the alerted problem. + type: string + type: object + type: array + webexConfigs: + description: List of Webex configurations. + items: + description: WebexConfig configures notification via Cisco Webex See https://prometheus.io/docs/alerting/latest/configuration/#webex_config + properties: + apiURL: + description: The Webex Teams API URL i.e. https://webexapis.com/v1/messages Provide if different from the default API URL. + pattern: ^https?://.+$ + type: string + httpConfig: + description: The HTTP client's configuration. You must supply the bot token via the `httpConfig.authorization` field. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: Message template + type: string + roomID: + description: ID of the Webex Teams room where to send the messages. + minLength: 1 + type: string + sendResolved: + description: Whether to notify about resolved alerts. + type: boolean + required: + - roomID + type: object + type: array + webhookConfigs: + description: List of webhook configurations. + items: + description: WebhookConfig configures notifications via a generic receiver supporting the webhook payload. See https://prometheus.io/docs/alerting/latest/configuration/#webhook_config + properties: + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + maxAlerts: + description: Maximum number of alerts to be sent per webhook message. When 0, all alerts are included. + format: int32 + minimum: 0 + type: integer + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + url: + description: The URL to send HTTP POST requests to. `urlSecret` takes precedence over `url`. One of `urlSecret` and `url` should be defined. + type: string + urlSecret: + description: The secret's key that contains the webhook URL to send HTTP requests to. `urlSecret` takes precedence over `url`. One of `urlSecret` and `url` should be defined. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + wechatConfigs: + description: List of WeChat configurations. + items: + description: WeChatConfig configures notifications via WeChat. See https://prometheus.io/docs/alerting/latest/configuration/#wechat_config + properties: + agentID: + type: string + apiSecret: + description: The secret's key that contains the WeChat API key. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + apiURL: + description: The WeChat API URL. + type: string + corpID: + description: The corp id for authentication. + type: string + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: API request data as defined by the WeChat API. + type: string + messageType: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + toParty: + type: string + toTag: + type: string + toUser: + type: string + type: object + type: array + required: + - name + type: object + type: array + route: + description: The Alertmanager route definition for alerts matching the resource's namespace. If present, it will be added to the generated Alertmanager configuration as a first-level route. + properties: + activeTimeIntervals: + description: ActiveTimeIntervals is a list of MuteTimeInterval names when this route should be active. + items: + type: string + type: array + continue: + description: Boolean indicating whether an alert should continue matching subsequent sibling nodes. It will always be overridden to true for the first-level route by the Prometheus operator. + type: boolean + groupBy: + description: List of labels to group by. Labels must not be repeated (unique list). Special label "..." (aggregate by all possible labels), if provided, must be the only element in the list. + items: + type: string + type: array + groupInterval: + description: 'How long to wait before sending an updated notification. Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` Example: "5m"' + type: string + groupWait: + description: 'How long to wait before sending the initial notification. Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` Example: "30s"' + type: string + matchers: + description: 'List of matchers that the alert''s labels should match. For the first level route, the operator removes any existing equality and regexp matcher on the `namespace` label and adds a `namespace: ` matcher.' + items: + description: Matcher defines how to match on alert's labels. + properties: + matchType: + description: Match operation available with AlertManager >= v0.22.0 and takes precedence over Regex (deprecated) if non-empty. + enum: + - '!=' + - = + - =~ + - '!~' + type: string + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: 'Whether to match on equality (false) or regular-expression (true). Deprecated: for AlertManager >= v0.22.0, `matchType` should be used instead.' + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + muteTimeIntervals: + description: 'Note: this comment applies to the field definition above but appears below otherwise it gets included in the generated manifest. CRD schema doesn''t support self-referential types for now (see https://github.com/kubernetes/kubernetes/issues/62872). We have to use an alternative type to circumvent the limitation. The downside is that the Kube API can''t validate the data beyond the fact that it is a valid JSON representation. MuteTimeIntervals is a list of MuteTimeInterval names that will mute this route when matched,' + items: + type: string + type: array + receiver: + description: Name of the receiver for this route. If not empty, it should be listed in the `receivers` field. + type: string + repeatInterval: + description: 'How long to wait before repeating the last notification. Must match the regular expression`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$` Example: "4h"' + type: string + routes: + description: Child routes. + items: + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/test/integration/framework/assets/0alertmanagerCustomResourceDefinition.yaml b/test/integration/framework/assets/0alertmanagerCustomResourceDefinition.yaml new file mode 100644 index 0000000000..481dc89327 --- /dev/null +++ b/test/integration/framework/assets/0alertmanagerCustomResourceDefinition.yaml @@ -0,0 +1,4648 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.70.0 + name: alertmanagers.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Alertmanager + listKind: AlertmanagerList + plural: alertmanagers + shortNames: + - am + singular: alertmanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Alertmanager + jsonPath: .spec.version + name: Version + type: string + - description: The number of desired replicas + jsonPath: .spec.replicas + name: Replicas + type: integer + - description: The number of ready replicas + jsonPath: .status.availableReplicas + name: Ready + type: integer + - jsonPath: .status.conditions[?(@.type == 'Reconciled')].status + name: Reconciled + type: string + - jsonPath: .status.conditions[?(@.type == 'Available')].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Whether the resource reconciliation is paused or not + jsonPath: .status.paused + name: Paused + priority: 1 + type: boolean + name: v1 + schema: + openAPIV3Schema: + description: Alertmanager describes an Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Alertmanager cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalPeers: + description: AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + items: + type: string + type: array + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alertmanagerConfigMatcherStrategy: + description: The AlertmanagerConfigMatcherStrategy defines how AlertmanagerConfig objects match the alerts. In the future more options may be added. + properties: + type: + default: OnNamespace + description: If set to `OnNamespace`, the operator injects a label matcher matching the namespace of the AlertmanagerConfig object for all its routes and inhibition rules. `None` will not add any additional matchers other than the ones specified in the AlertmanagerConfig. Default is `OnNamespace`. + enum: + - OnNamespace + - None + type: string + type: object + alertmanagerConfigNamespaceSelector: + description: Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + alertmanagerConfigSelector: + description: AlertmanagerConfigs to be selected for to merge and configure Alertmanager with. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + alertmanagerConfiguration: + description: 'EXPERIMENTAL: alertmanagerConfiguration specifies the configuration of Alertmanager. If defined, it takes precedence over the `configSecret` field. This field may change in future releases.' + properties: + global: + description: Defines the global parameters of the Alertmanager configuration. + properties: + httpConfig: + description: HTTP client configuration. + properties: + authorization: + description: Authorization header configuration for the client. This is mutually exclusive with BasicAuth and is only available starting from Alertmanager v0.22+. + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: BasicAuth for the client. This is mutually exclusive with Authorization. If both are defined, BasicAuth takes precedence. + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the Alertmanager object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + followRedirects: + description: FollowRedirects specifies whether the client should follow HTTP 3xx redirects. + type: boolean + oauth2: + description: OAuth2 client credentials used to fetch a token for the targets. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + opsGenieApiKey: + description: The default OpsGenie API Key. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + opsGenieApiUrl: + description: The default OpsGenie API URL. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + pagerdutyUrl: + description: The default Pagerduty URL. + type: string + resolveTimeout: + description: ResolveTimeout is the default value used by alertmanager if the alert does not include EndsAt, after this time passes it can declare the alert as resolved if it has not been updated. This has no impact on alerts from Prometheus, as they always include EndsAt. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + slackApiUrl: + description: The default Slack API URL. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + smtp: + description: Configures global SMTP parameters. + properties: + authIdentity: + description: SMTP Auth using PLAIN + type: string + authPassword: + description: SMTP Auth using LOGIN and PLAIN. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authSecret: + description: SMTP Auth using CRAM-MD5. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + authUsername: + description: SMTP Auth using CRAM-MD5, LOGIN and PLAIN. If empty, Alertmanager doesn't authenticate to the SMTP server. + type: string + from: + description: The default SMTP From header field. + type: string + hello: + description: The default hostname to identify to the SMTP server. + type: string + requireTLS: + description: The default SMTP TLS requirement. Note that Go does not support unencrypted connections to remote SMTP endpoints. + type: boolean + smartHost: + description: The default SMTP smarthost used for sending emails. + properties: + host: + description: Defines the host's address, it can be a DNS name or a literal IP address. + minLength: 1 + type: string + port: + description: Defines the host's port, it can be a literal port number or a port name. + minLength: 1 + type: string + required: + - host + - port + type: object + type: object + type: object + name: + description: The name of the AlertmanagerConfig resource which is used to generate the Alertmanager configuration. It must be defined in the same namespace as the Alertmanager object. The operator will not enforce a `namespace` label for routes and inhibition rules. + minLength: 1 + type: string + templates: + description: Custom notification templates. + items: + description: SecretOrConfigMap allows to specify data as a Secret or ConfigMap. Fields are mutually exclusive. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + type: object + automountServiceAccountToken: + description: 'AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod. If the service account has `automountServiceAccountToken: true`, set the field to `false` to opt out of automounting API credentials.' + type: boolean + baseImage: + description: 'Base image that is used to deploy pods, without tag. Deprecated: use ''image'' instead.' + type: string + clusterAdvertiseAddress: + description: 'ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918' + type: string + clusterGossipInterval: + description: Interval between gossip attempts. + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + clusterLabel: + description: Defines the identifier that uniquely identifies the Alertmanager cluster. You should only set it when the Alertmanager cluster includes Alertmanager instances which are external to this Alertmanager resource. In practice, the addresses of the external instances are provided via the `.spec.additionalPeers` field. + type: string + clusterPeerTimeout: + description: Timeout for cluster peering. + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + clusterPushpullInterval: + description: Interval between pushpull attempts. + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. The ConfigMaps are mounted into `/etc/alertmanager/configmaps/` in the 'alertmanager' container. + items: + type: string + type: array + configSecret: + description: "ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains the configuration for this Alertmanager instance. If empty, it defaults to `alertmanager-`. \n The Alertmanager configuration should be available under the `alertmanager.yaml` key. Additional keys from the original secret are copied to the generated secret and mounted into the `/etc/alertmanager/config` directory in the `alertmanager` container. \n If either the secret or the `alertmanager.yaml` key is missing, the operator provisions a minimal Alertmanager configuration with one empty receiver (effectively dropping alert notifications)." + type: string + containers: + description: 'Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. Containers described here modify an operator generated container if they share the same name and modifications are done via a strategic merge patch. The current container names are: `alertmanager` and `config-reloader`. Overriding containers is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod''s restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + externalUrl: + description: The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. + type: string + forceEnableClusterMode: + description: ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + type: boolean + hostAliases: + description: Pods' hostAliases configuration + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + required: + - hostnames + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + image: + description: Image if specified has precedence over baseImage, tag and sha combinations. Specifying the version is still necessary to ensure the Prometheus Operator knows what version of Alertmanager is being configured. + type: string + imagePullPolicy: + description: Image pull policy for the 'alertmanager', 'init-config-reloader' and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. + enum: + - "" + - Always + - Never + - IfNotPresent + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace to use for pulling prometheus and alertmanager images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. fetch secrets for injection into the Alertmanager configuration from external sources. Any errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ InitContainers described here modify an operator generated init containers if they share the same name and modifications are done via a strategic merge patch. The current init container name is: `init-config-reloader`. Overriding init containers is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod''s restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. Note this is only for the Alertmanager UI, not the gossip communication. + type: boolean + logFormat: + description: Log format for Alertmanager to be configured with. + enum: + - "" + - logfmt + - json + type: string + logLevel: + description: Log level for Alertmanager to be configured with. + enum: + - "" + - debug + - info + - warn + - error + type: string + minReadySeconds: + description: Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field from kubernetes 1.22 until 1.24 which requires enabling the StatefulSetMinReadySeconds feature gate. + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + paused: + description: If set to true all actions on the underlying managed objects are not goint to be performed, except for delete actions. + type: boolean + podMetadata: + description: "PodMetadata configures labels and annotations which are propagated to the Alertmanager pods. \n The following items are reserved and cannot be overridden: * \"alertmanager\" label, set to the name of the Alertmanager instance. * \"app.kubernetes.io/instance\" label, set to the name of the Alertmanager instance. * \"app.kubernetes.io/managed-by\" label, set to \"prometheus-operator\". * \"app.kubernetes.io/name\" label, set to \"alertmanager\". * \"app.kubernetes.io/version\" label, set to the Alertmanager version. * \"kubectl.kubernetes.io/default-container\" annotation, set to \"alertmanager\"." + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + portName: + default: web + description: Port name used for the pods and governing service. Defaults to `web`. + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + replicas: + description: Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the running cluster equal to the expected size. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + retention: + default: 120h + description: Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + pattern: ^(0|(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + routePrefix: + description: The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. Each Secret is added to the StatefulSet definition as a volume named `secret-`. The Secrets are mounted into `/etc/alertmanager/secrets/` in the 'alertmanager' container. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. Note that this field cannot be set when spec.os.name is windows.' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to use to run the Prometheus Pods. + type: string + sha: + description: 'SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. Similar to a tag, but the SHA explicitly deploys an immutable container image. Version and Tag are ignored if SHA is set. Deprecated: use ''image'' instead. The image digest can be specified as part of the image URL.' + type: string + storage: + description: Storage is the definition of how storage will be used by the Alertmanager instances. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be removed in a future release.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the StatefulSet. If specified, it takes precedence over `ephemeral` and `volumeClaimTemplate`. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: 'EphemeralVolumeSource to be used by the StatefulSet. This is a beta field in k8s 1.21 and GA in 1.15. For lower versions, starting with k8s 1.19, it requires enabling the GenericEphemeralVolume feature gate. More info: https://kubernetes.io/docs/concepts/storage/ephemeral-volumes/#generic-ephemeral-volumes' + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + volumeClaimTemplate: + description: Defines the PVC spec to be used by the Prometheus StatefulSets. The easiest way to use a volume that cannot be automatically provisioned is to use a label selector alongside manually created PersistentVolumes. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Deprecated: this field is never set.' + properties: + accessModes: + description: 'accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + allocatedResourceStatuses: + additionalProperties: + description: When a controller receives persistentvolume claim update with ClaimResourceStatus for a resource that it does not recognizes, then it should ignore that update and let other controllers handle it. + type: string + description: "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. \n ClaimResourceStatus can be in any of following states: - ControllerResizeInProgress: State set when resize controller starts resizing the volume in control-plane. - ControllerResizeFailed: State set when resize has failed in resize controller with a terminal error. - NodeResizePending: State set when resize controller has finished resizing the volume but further resizing of volume is needed on the node. - NodeResizeInProgress: State set when kubelet starts resizing the volume. - NodeResizeFailed: State set when resizing has failed in kubelet with a terminal error. Transient errors don't set NodeResizeFailed. For example: if expanding a PVC for more capacity - this field can be one of the following states: - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\" - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\" When this field is not set, it means that no resize operation is in progress for the given PVC. \n A controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. \n This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature." + type: object + x-kubernetes-map-type: granular + allocatedResources: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either: * Un-prefixed keys: - storage - the capacity of the volume. * Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\" Apart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used. \n Capacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity. \n A controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC. \n This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature." + type: object + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: capacity represents the actual resources of the underlying volume. + type: object + conditions: + description: conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contains details about state of pvc + properties: + lastProbeTime: + description: lastProbeTime is the time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: lastTransitionTime is the time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: message is the human-readable message indicating details about last transition. + type: string + reason: + description: reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: object + tag: + description: 'Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. Version is ignored if Tag is set. Deprecated: use ''image'' instead. The image tag can be specified as part of the image URL.' + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + version: + description: Version the cluster should be on. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, that are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will be appended to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the blob storage + type: string + fsType: + description: fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: 'volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). + properties: + driver: + description: driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: readOnly specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'emptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'medium represents what type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. \n Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity tracking are needed, c) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through a PersistentVolumeClaim (see EphemeralVolumeSource for more information on the connection between this volume type and PersistentVolumeClaim). \n Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. \n Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. \n A pod can use both types of ephemeral volumes and persistent volumes at the same time." + properties: + volumeClaimTemplate: + description: "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). \n An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster. \n This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. \n Required, must not be nil." + properties: + metadata: + description: May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation. + type: object + spec: + description: The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here. + properties: + accessModes: + description: 'accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: 'dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn''t specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn''t set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef allows any non-core object, as well as PersistentVolumeClaim objects. * While dataSource ignores disallowed values (dropping them), dataSourceRef preserves all values, and generates an error if a disallowed value is specified. * While dataSource only allows local objects, dataSourceRef allows objects in any namespaces. (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + namespace: + description: Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: 'resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + selector: + description: selector is a label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: 'storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use for this volume. + type: string + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds extra command options if any.' + type: object + readOnly: + description: 'readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'iscsi represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'nfs represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: readOnly Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: configMap information about the configMap data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: 'Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + type: object + secret: + description: secret information about the secret data to project + properties: + items: + description: items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: optional field specify whether the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: group to map volume access to Default is no group + type: string + readOnly: + description: readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: user to map volume access to Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'rbd represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: 'user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: gateway is the host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: optional field specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'secretName is the name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + web: + description: Defines the web command line flags when starting Alertmanager. + properties: + getConcurrency: + description: Maximum number of GET requests processed concurrently. This corresponds to the Alertmanager's `--web.get-concurrency` flag. + format: int32 + type: integer + httpConfig: + description: Defines HTTP parameters for web server. + properties: + headers: + description: List of headers that can be added to HTTP responses. + properties: + contentSecurityPolicy: + description: Set the Content-Security-Policy header to HTTP responses. Unset if blank. + type: string + strictTransportSecurity: + description: Set the Strict-Transport-Security header to HTTP responses. Unset if blank. Please make sure that you use this with care as this header might force browsers to load Prometheus and the other applications hosted on the same domain and subdomains over HTTPS. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security + type: string + xContentTypeOptions: + description: Set the X-Content-Type-Options header to HTTP responses. Unset if blank. Accepted value is nosniff. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Content-Type-Options + enum: + - "" + - NoSniff + type: string + xFrameOptions: + description: Set the X-Frame-Options header to HTTP responses. Unset if blank. Accepted values are deny and sameorigin. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Frame-Options + enum: + - "" + - Deny + - SameOrigin + type: string + xXSSProtection: + description: Set the X-XSS-Protection header to all responses. Unset if blank. https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-XSS-Protection + type: string + type: object + http2: + description: Enable HTTP/2 support. Note that HTTP/2 is only supported with TLS. When TLSConfig is not configured, HTTP/2 will be disabled. Whenever the value of the field changes, a rolling update will be triggered. + type: boolean + type: object + timeout: + description: Timeout for HTTP requests. This corresponds to the Alertmanager's `--web.timeout` flag. + format: int32 + type: integer + tlsConfig: + description: Defines the TLS parameters for HTTPS. + properties: + cert: + description: Contains the TLS certificate for the server. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cipherSuites: + description: 'List of supported cipher suites for TLS versions up to TLS 1.2. If empty, Go default cipher suites are used. Available cipher suites are documented in the go documentation: https://golang.org/pkg/crypto/tls/#pkg-constants' + items: + type: string + type: array + client_ca: + description: Contains the CA certificate for client certificate authentication to the server. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientAuthType: + description: 'Server policy for client authentication. Maps to ClientAuth Policies. For more detail on clientAuth options: https://golang.org/pkg/crypto/tls/#ClientAuthType' + type: string + curvePreferences: + description: 'Elliptic curves that will be used in an ECDHE handshake, in preference order. Available curves are documented in the go documentation: https://golang.org/pkg/crypto/tls/#CurveID' + items: + type: string + type: array + keySecret: + description: Secret containing the TLS key for the server. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + maxVersion: + description: Maximum TLS version that is acceptable. Defaults to TLS13. + type: string + minVersion: + description: Minimum TLS version that is acceptable. Defaults to TLS12. + type: string + preferServerCipherSuites: + description: Controls whether the server selects the client's most preferred cipher suite, or the server's most preferred cipher suite. If true then the server's preference, as expressed in the order of elements in cipherSuites, is used. + type: boolean + required: + - cert + - keySecret + type: object + type: object + type: object + status: + description: 'Most recent observed status of the Alertmanager cluster. Read-only. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) targeted by this Alertmanager cluster. + format: int32 + type: integer + conditions: + description: The current state of the Alertmanager object. + items: + description: Condition represents the state of the resources associated with the Prometheus, Alertmanager or ThanosRuler resource. + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update to the current status property. + format: date-time + type: string + message: + description: Human-readable message indicating details for the condition's last transition. + type: string + observedGeneration: + description: ObservedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if `.metadata.generation` is currently 12, but the `.status.conditions[].observedGeneration` is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + type: integer + reason: + description: Reason for the condition's last transition. + type: string + status: + description: Status of the condition. + type: string + type: + description: Type of the condition being reported. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + paused: + description: Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this Alertmanager object (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Alertmanager object. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this Alertmanager object that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/test/integration/framework/assets/0podmonitorCustomResourceDefinition.yaml b/test/integration/framework/assets/0podmonitorCustomResourceDefinition.yaml new file mode 100644 index 0000000000..35c10c1bc6 --- /dev/null +++ b/test/integration/framework/assets/0podmonitorCustomResourceDefinition.yaml @@ -0,0 +1,548 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.70.0 + name: podmonitors.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PodMonitor + listKind: PodMonitorList + plural: podmonitors + shortNames: + - pmon + singular: podmonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PodMonitor defines monitoring for a set of pods. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Pod selection for target discovery by Prometheus. + properties: + attachMetadata: + description: "`attachMetadata` defines additional metadata which is added to the discovered targets. \n It requires Prometheus >= v2.37.0." + properties: + node: + description: When set to true, Prometheus must have the `get` permission on the `Nodes` objects. + type: boolean + type: object + jobLabel: + description: "The label to use to retrieve the job name from. `jobLabel` selects the label from the associated Kubernetes `Pod` object which will be used as the `job` label for all metrics. \n For example if `jobLabel` is set to `foo` and the Kubernetes `Pod` object is labeled with `foo: bar`, then Prometheus adds the `job=\"bar\"` label to all ingested metrics. \n If the value of this field is empty, the `job` label of the metrics defaults to the namespace and name of the PodMonitor object (e.g. `/`)." + type: string + keepDroppedTargets: + description: "Per-scrape limit on the number of targets dropped by relabeling that will be kept in memory. 0 means no limit. \n It requires Prometheus >= v2.47.0." + format: int64 + type: integer + labelLimit: + description: "Per-scrape limit on number of labels that will be accepted for a sample. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + labelNameLengthLimit: + description: "Per-scrape limit on length of labels name that will be accepted for a sample. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + labelValueLengthLimit: + description: "Per-scrape limit on length of labels value that will be accepted for a sample. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + namespaceSelector: + description: Selector to select which namespaces the Kubernetes `Pods` objects are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names to select from. + items: + type: string + type: array + type: object + podMetricsEndpoints: + description: List of endpoints part of this PodMonitor. + items: + description: PodMetricsEndpoint defines an endpoint serving Prometheus metrics to be scraped by Prometheus. + properties: + authorization: + description: "`authorization` configures the Authorization header credentials to use when scraping the target. \n Cannot be set at the same time as `basicAuth`, or `oauth2`." + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: "`basicAuth` configures the Basic Authentication credentials to use when scraping the target. \n Cannot be set at the same time as `authorization`, or `oauth2`." + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: "`bearerTokenSecret` specifies a key of a Secret containing the bearer token for scraping targets. The secret needs to be in the same namespace as the PodMonitor object and readable by the Prometheus Operator. \n Deprecated: use `authorization` instead." + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + enableHttp2: + description: '`enableHttp2` can be used to disable HTTP2 when scraping the target.' + type: boolean + filterRunning: + description: "When true, the pods which are not running (e.g. either in Failed or Succeeded state) are dropped during the target discovery. \n If unset, the filtering is enabled. \n More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase" + type: boolean + followRedirects: + description: '`followRedirects` defines whether the scrape requests should follow HTTP 3xx redirects.' + type: boolean + honorLabels: + description: When true, `honorLabels` preserves the metric's labels when they collide with the target's labels. + type: boolean + honorTimestamps: + description: '`honorTimestamps` controls whether Prometheus preserves the timestamps when exposed by the target.' + type: boolean + interval: + description: "Interval at which Prometheus scrapes the metrics from the target. \n If empty, Prometheus uses the global scrape interval." + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + metricRelabelings: + description: '`metricRelabelings` configures the relabeling rules to apply to the samples before ingestion.' + items: + description: "RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. \n `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source label values. \n Only applicable when the action is `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. + type: string + replacement: + description: "Replacement value against which a Replace action is performed if the regular expression matches. \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression. + items: + description: LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written in a replacement. \n It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + oauth2: + description: "`oauth2` configures the OAuth2 settings to use when scraping the target. \n It requires Prometheus >= 2.27.0. \n Cannot be set at the same time as `authorization`, or `basicAuth`." + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + params: + additionalProperties: + items: + type: string + type: array + description: '`params` define optional HTTP URL parameters.' + type: object + path: + description: "HTTP path from which to scrape for metrics. \n If empty, Prometheus uses the default value (e.g. `/metrics`)." + type: string + port: + description: "Name of the Pod port which this endpoint refers to. \n It takes precedence over `targetPort`." + type: string + proxyUrl: + description: '`proxyURL` configures the HTTP Proxy URL (e.g. "http://proxyserver:2195") to go through when scraping the target.' + type: string + relabelings: + description: "`relabelings` configures the relabeling rules to apply the target's metadata labels. \n The Operator automatically adds relabelings for a few standard Kubernetes fields. \n The original scrape job's name is available via the `__tmp_prometheus_job_name` label. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + items: + description: "RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. \n `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source label values. \n Only applicable when the action is `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. + type: string + replacement: + description: "Replacement value against which a Replace action is performed if the regular expression matches. \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression. + items: + description: LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written in a replacement. \n It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + scheme: + description: "HTTP scheme to use for scraping. \n `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. \n If empty, Prometheus uses the default value `http`." + enum: + - http + - https + type: string + scrapeTimeout: + description: "Timeout after which Prometheus considers the scrape to be failed. \n If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used." + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: "Name or number of the target port of the `Pod` object behind the Service, the port must be specified with container port property. \n Deprecated: use 'port' instead." + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the target. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + trackTimestampsStaleness: + description: "`trackTimestampsStaleness` defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false. \n It requires Prometheus >= v2.48.0." + type: boolean + type: object + type: array + podTargetLabels: + description: '`podTargetLabels` defines the labels which are transferred from the associated Kubernetes `Pod` object onto the ingested metrics.' + items: + type: string + type: array + sampleLimit: + description: '`sampleLimit` defines a per-scrape limit on the number of scraped samples that will be accepted.' + format: int64 + type: integer + selector: + description: Label selector to select the Kubernetes `Pod` objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + targetLimit: + description: '`targetLimit` defines a limit on the number of scraped targets that will be accepted.' + format: int64 + type: integer + required: + - selector + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/test/integration/framework/assets/0probeCustomResourceDefinition.yaml b/test/integration/framework/assets/0probeCustomResourceDefinition.yaml new file mode 100644 index 0000000000..f944a75e6b --- /dev/null +++ b/test/integration/framework/assets/0probeCustomResourceDefinition.yaml @@ -0,0 +1,585 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.70.0 + name: probes.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Probe + listKind: ProbeList + plural: probes + shortNames: + - prb + singular: probe + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Probe defines monitoring for a set of static targets or ingresses. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Ingress selection for target discovery by Prometheus. + properties: + authorization: + description: Authorization section for this endpoint + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over basic authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping targets. The secret needs to be in the same namespace as the probe and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + interval: + description: Interval at which targets are probed using the configured prober. If not specified Prometheus' global scrape interval is used. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + jobName: + description: The job name assigned to scraped metrics by default. + type: string + keepDroppedTargets: + description: "Per-scrape limit on the number of targets dropped by relabeling that will be kept in memory. 0 means no limit. \n It requires Prometheus >= v2.47.0." + format: int64 + type: integer + labelLimit: + description: Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + format: int64 + type: integer + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: "RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. \n `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source label values. \n Only applicable when the action is `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. + type: string + replacement: + description: "Replacement value against which a Replace action is performed if the regular expression matches. \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression. + items: + description: LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written in a replacement. \n It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + module: + description: 'The module to use for probing specifying how to probe the target. Example module configuring in the blackbox exporter: https://github.com/prometheus/blackbox_exporter/blob/master/example.yml' + type: string + oauth2: + description: OAuth2 for the URL. Only valid in Prometheus versions 2.27.0 and newer. + properties: + clientId: + description: '`clientId` specifies a key of a Secret or ConfigMap containing the OAuth2 client''s ID.' + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + clientSecret: + description: '`clientSecret` specifies a key of a Secret containing the OAuth2 client''s secret.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + endpointParams: + additionalProperties: + type: string + description: '`endpointParams` configures the HTTP parameters to append to the token URL.' + type: object + scopes: + description: '`scopes` defines the OAuth2 scopes used for the token request.' + items: + type: string + type: array + tokenUrl: + description: '`tokenURL` configures the URL to fetch the token from.' + minLength: 1 + type: string + required: + - clientId + - clientSecret + - tokenUrl + type: object + prober: + description: Specification for the prober to use for probing targets. The prober.URL parameter is required. Targets cannot be probed if left empty. + properties: + path: + default: /probe + description: Path to collect metrics from. Defaults to `/probe`. + type: string + proxyUrl: + description: Optional ProxyURL. + type: string + scheme: + description: HTTP scheme to use for scraping. `http` and `https` are the expected values unless you rewrite the `__scheme__` label via relabeling. If empty, Prometheus uses the default value `http`. + enum: + - http + - https + type: string + url: + description: Mandatory URL of the prober. + type: string + required: + - url + type: object + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + format: int64 + type: integer + scrapeTimeout: + description: Timeout for scraping metrics from the Prometheus exporter. If not specified, the Prometheus global scrape timeout is used. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + targetLimit: + description: TargetLimit defines a limit on the number of scraped targets that will be accepted. + format: int64 + type: integer + targets: + description: Targets defines a set of static or dynamically discovered targets to probe. + properties: + ingress: + description: ingress defines the Ingress objects to probe and the relabeling configuration. If `staticConfig` is also defined, `staticConfig` takes precedence. + properties: + namespaceSelector: + description: From which namespaces to select Ingress objects. + properties: + any: + description: Boolean describing whether all namespaces are selected in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names to select from. + items: + type: string + type: array + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to the label set of the target before it gets scraped. The original ingress address is available via the `__tmp_prometheus_ingress_address` label. It can be used to customize the probed URL. The original scrape job''s name is available via the `__tmp_prometheus_job_name` label. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: "RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. \n `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source label values. \n Only applicable when the action is `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. + type: string + replacement: + description: "Replacement value against which a Replace action is performed if the regular expression matches. \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression. + items: + description: LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written in a replacement. \n It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + selector: + description: Selector to select the Ingress objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + type: object + staticConfig: + description: 'staticConfig defines the static list of targets to probe and the relabeling configuration. If `ingress` is also defined, `staticConfig` takes precedence. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.' + properties: + labels: + additionalProperties: + type: string + description: Labels assigned to all metrics scraped from the targets. + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to the label set of the targets before it gets scraped. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: "RelabelConfig allows dynamic rewriting of the label set for targets, alerts, scraped samples and remote write samples. \n More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config" + properties: + action: + default: replace + description: "Action to perform based on the regex matching. \n `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0. `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0. \n Default: \"Replace\"" + enum: + - replace + - Replace + - keep + - Keep + - drop + - Drop + - hashmod + - HashMod + - labelmap + - LabelMap + - labeldrop + - LabelDrop + - labelkeep + - LabelKeep + - lowercase + - Lowercase + - uppercase + - Uppercase + - keepequal + - KeepEqual + - dropequal + - DropEqual + type: string + modulus: + description: "Modulus to take of the hash of the source label values. \n Only applicable when the action is `HashMod`." + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. + type: string + replacement: + description: "Replacement value against which a Replace action is performed if the regular expression matches. \n Regex capture groups are available." + type: string + separator: + description: Separator is the string between concatenated SourceLabels. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured Separator and matched against the configured regular expression. + items: + description: LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, as well as underscores. + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + type: string + type: array + targetLabel: + description: "Label to which the resulting string is written in a replacement. \n It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`, `KeepEqual` and `DropEqual` actions. \n Regex capture groups are available." + type: string + type: object + type: array + static: + description: The list of hosts to probe. + items: + type: string + type: array + type: object + type: object + tlsConfig: + description: TLS configuration to use when scraping the endpoint. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/test/integration/framework/assets/0prometheusCustomResourceDefinition.yaml b/test/integration/framework/assets/0prometheusCustomResourceDefinition.yaml new file mode 100644 index 0000000000..30f7182252 --- /dev/null +++ b/test/integration/framework/assets/0prometheusCustomResourceDefinition.yaml @@ -0,0 +1,6438 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.13.0 + operator.prometheus.io/version: 0.70.0 + name: prometheuses.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Prometheus + listKind: PrometheusList + plural: prometheuses + shortNames: + - prom + singular: prometheus + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Prometheus + jsonPath: .spec.version + name: Version + type: string + - description: The number of desired replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: The number of ready replicas + jsonPath: .status.availableReplicas + name: Ready + type: integer + - jsonPath: .status.conditions[?(@.type == 'Reconciled')].status + name: Reconciled + type: string + - jsonPath: .status.conditions[?(@.type == 'Available')].status + name: Available + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Whether the resource reconciliation is paused or not + jsonPath: .status.paused + name: Paused + priority: 1 + type: boolean + name: v1 + schema: + openAPIV3Schema: + description: Prometheus defines a Prometheus deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Prometheus cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalAlertManagerConfigs: + description: "AdditionalAlertManagerConfigs specifies a key of a Secret containing additional Prometheus Alertmanager configurations. The Alertmanager configurations are appended to the configuration generated by the Prometheus Operator. They must be formatted according to the official Prometheus documentation: \n https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config \n The user is responsible for making sure that the configurations are valid \n Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade." + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + additionalAlertRelabelConfigs: + description: "AdditionalAlertRelabelConfigs specifies a key of a Secret containing additional Prometheus alert relabel configurations. The alert relabel configurations are appended to the configuration generated by the Prometheus Operator. They must be formatted according to the official Prometheus documentation: \n https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs \n The user is responsible for making sure that the configurations are valid \n Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel configs are going to break Prometheus after the upgrade." + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + additionalArgs: + description: "AdditionalArgs allows setting additional arguments for the 'prometheus' container. \n It is intended for e.g. activating hidden flags which are not supported by the dedicated configuration options yet. The arguments are passed as-is to the Prometheus container which may cause issues if they are invalid or not supported by the given Prometheus version. \n In case of an argument conflict (e.g. an argument which is already set by the operator itself) or when providing an invalid argument, the reconciliation will fail and an error will be logged." + items: + description: Argument as part of the AdditionalArgs list. + properties: + name: + description: Name of the argument, e.g. "scrape.discovery-reload-interval". + minLength: 1 + type: string + value: + description: Argument value, e.g. 30s. Can be empty for name-only arguments (e.g. --storage.tsdb.no-lockfile) + type: string + required: + - name + type: object + type: array + additionalScrapeConfigs: + description: 'AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Prometheus scrape configurations. Scrape configurations specified are appended to the configurations generated by the Prometheus Operator. Job configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + affinity: + description: Defines the Pods' affinity scheduling rules if specified. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alerting: + description: Defines the settings related to Alertmanager. + properties: + alertmanagers: + description: AlertmanagerEndpoints Prometheus should fire alerts against. + items: + description: AlertmanagerEndpoints defines a selection of a single Endpoints object containing Alertmanager IPs to fire alerts against. + properties: + apiVersion: + description: Version of the Alertmanager API that Prometheus uses to send alerts. It can be "v1" or "v2". + type: string + authorization: + description: "Authorization section for Alertmanager. \n Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `sigv4`." + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: "BasicAuth configuration for Alertmanager. \n Cannot be set at the same time as `bearerTokenFile`, `authorization` or `sigv4`." + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerTokenFile: + description: "File to read bearer token for Alertmanager. \n Cannot be set at the same time as `basicAuth`, `authorization`, or `sigv4`. \n Deprecated: this will be removed in a future release. Prefer using `authorization`." + type: string + enableHttp2: + description: Whether to enable HTTP2. + type: boolean + name: + description: Name of the Endpoints object in the namespace. + type: string + namespace: + description: Namespace of the Endpoints object. + type: string + pathPrefix: + description: Prefix for the HTTP path alerts are pushed to. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port on which the Alertmanager API is exposed. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use when firing alerts. + type: string + sigv4: + description: "Sigv4 allows to configures AWS's Signature Verification 4 for the URL. \n It requires Prometheus >= v2.48.0. \n Cannot be set at the same time as `basicAuth`, `bearerTokenFile` or `authorization`." + properties: + accessKey: + description: AccessKey is the AWS API key. If not specified, the environment variable `AWS_ACCESS_KEY_ID` is used. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + profile: + description: Profile is the named AWS profile used to authenticate. + type: string + region: + description: Region is the AWS region. If blank, the region from the default credentials chain used. + type: string + roleArn: + description: RoleArn is the named AWS profile used to authenticate. + type: string + secretKey: + description: SecretKey is the AWS API secret. If not specified, the environment variable `AWS_SECRET_ACCESS_KEY` is used. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + timeout: + description: Timeout is a per-target Alertmanager timeout when pushing alerts. + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + tlsConfig: + description: TLS Config to use for Alertmanager. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - name + - namespace + - port + type: object + type: array + required: + - alertmanagers + type: object + allowOverlappingBlocks: + description: "AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. \n Deprecated: this flag has no effect for Prometheus >= 2.39.0 where overlapping blocks are enabled by default." + type: boolean + apiserverConfig: + description: 'APIServerConfig allows specifying a host and auth methods to access the Kuberntees API server. If null, Prometheus is assumed to run inside of the cluster: it will discover the API servers automatically and use the Pod''s CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.' + properties: + authorization: + description: "Authorization section for the API server. \n Cannot be set at the same time as `basicAuth`, `bearerToken`, or `bearerTokenFile`." + properties: + credentials: + description: Selects a key of a Secret in the namespace that contains the credentials for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + credentialsFile: + description: File to read a secret from, mutually exclusive with `credentials`. + type: string + type: + description: "Defines the authentication type. The value is case-insensitive. \n \"Basic\" is not a supported value. \n Default: \"Bearer\"" + type: string + type: object + basicAuth: + description: "BasicAuth configuration for the API server. \n Cannot be set at the same time as `authorization`, `bearerToken`, or `bearerTokenFile`." + properties: + password: + description: '`password` specifies a key of a Secret containing the password for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + username: + description: '`username` specifies a key of a Secret containing the username for authentication.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + bearerToken: + description: "*Warning: this field shouldn't be used because the token value appears in clear-text. Prefer using `authorization`.* \n Deprecated: this will be removed in a future release." + type: string + bearerTokenFile: + description: "File to read bearer token for accessing apiserver. \n Cannot be set at the same time as `basicAuth`, `authorization`, or `bearerToken`. \n Deprecated: this will be removed in a future release. Prefer using `authorization`." + type: string + host: + description: Kubernetes API address consisting of a hostname or IP address followed by an optional port number. + type: string + tlsConfig: + description: TLS Config to use for the API server. + properties: + ca: + description: Certificate authority used when verifying server certificates. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Client certificate to present when doing client-authentication. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - host + type: object + arbitraryFSAccessThroughSMs: + description: When true, ServiceMonitor, PodMonitor and Probe object are forbidden to reference arbitrary files on the file system of the 'prometheus' container. When a ServiceMonitor's endpoint specifies a `bearerTokenFile` value (e.g. '/var/run/secrets/kubernetes.io/serviceaccount/token'), a malicious target can get access to the Prometheus service account's token in the Prometheus' scrape request. Setting `spec.arbitraryFSAccessThroughSM` to 'true' would prevent the attack. Users should instead provide the credentials using the `spec.bearerTokenSecret` field. + properties: + deny: + type: boolean + type: object + baseImage: + description: 'Deprecated: use ''spec.image'' instead.' + type: string + bodySizeLimit: + description: BodySizeLimit defines per-scrape on response body size. Only valid in Prometheus versions 2.45.0 and newer. + pattern: (^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$ + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. Each ConfigMap is added to the StatefulSet definition as a volume named `configmap-`. The ConfigMaps are mounted into /etc/prometheus/configmaps/ in the 'prometheus' container. + items: + type: string + type: array + containers: + description: "Containers allows injecting additional containers or modifying operator generated containers. This can be used to allow adding an authentication proxy to the Pods or to change the behavior of an operator generated container. Containers described here modify an operator generated container if they share the same name and modifications are done via a strategic merge patch. \n The names of containers managed by the operator are: * `prometheus` * `config-reloader` * `thanos-sidecar` \n Overriding containers is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice." + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod''s restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + disableCompaction: + description: When true, the Prometheus compaction is disabled. + type: boolean + enableAdminAPI: + description: "Enables access to the Prometheus web admin API. \n WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, shutdown Prometheus, and more. Enabling this should be done with care and the user is advised to add additional authentication authorization via a proxy to ensure only clients authorized to perform these actions can do so. \n For more information: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis" + type: boolean + enableFeatures: + description: "Enable access to Prometheus feature flags. By default, no features are enabled. \n Enabling features which are disabled by default is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice. \n For more information see https://prometheus.io/docs/prometheus/latest/feature_flags/" + items: + type: string + type: array + enableRemoteWriteReceiver: + description: "Enable Prometheus to be used as a receiver for the Prometheus remote write protocol. \n WARNING: This is not considered an efficient way of ingesting samples. Use it with caution for specific low-volume use cases. It is not suitable for replacing the ingestion via scraping and turning Prometheus into a push-based metrics collection system. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#remote-write-receiver \n It requires Prometheus >= v2.33.0." + type: boolean + enforcedBodySizeLimit: + description: "When defined, enforcedBodySizeLimit specifies a global limit on the size of uncompressed response body that will be accepted by Prometheus. Targets responding with a body larger than this many bytes will cause the scrape to fail. \n It requires Prometheus >= v2.28.0." + pattern: (^0|([0-9]*[.])?[0-9]+((K|M|G|T|E|P)i?)?B)$ + type: string + enforcedKeepDroppedTargets: + description: "When defined, enforcedKeepDroppedTargets specifies a global limit on the number of targets dropped by relabeling that will be kept in memory. The value overrides any `spec.keepDroppedTargets` set by ServiceMonitor, PodMonitor, Probe objects unless `spec.keepDroppedTargets` is greater than zero and less than `spec.enforcedKeepDroppedTargets`. \n It requires Prometheus >= v2.47.0." + format: int64 + type: integer + enforcedLabelLimit: + description: "When defined, enforcedLabelLimit specifies a global limit on the number of labels per sample. The value overrides any `spec.labelLimit` set by ServiceMonitor, PodMonitor, Probe objects unless `spec.labelLimit` is greater than zero and less than `spec.enforcedLabelLimit`. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + enforcedLabelNameLengthLimit: + description: "When defined, enforcedLabelNameLengthLimit specifies a global limit on the length of labels name per sample. The value overrides any `spec.labelNameLengthLimit` set by ServiceMonitor, PodMonitor, Probe objects unless `spec.labelNameLengthLimit` is greater than zero and less than `spec.enforcedLabelNameLengthLimit`. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + enforcedLabelValueLengthLimit: + description: "When not null, enforcedLabelValueLengthLimit defines a global limit on the length of labels value per sample. The value overrides any `spec.labelValueLengthLimit` set by ServiceMonitor, PodMonitor, Probe objects unless `spec.labelValueLengthLimit` is greater than zero and less than `spec.enforcedLabelValueLengthLimit`. \n It requires Prometheus >= v2.27.0." + format: int64 + type: integer + enforcedNamespaceLabel: + description: "When not empty, a label will be added to \n 1. All metrics scraped from `ServiceMonitor`, `PodMonitor`, `Probe` and `ScrapeConfig` objects. 2. All metrics generated from recording rules defined in `PrometheusRule` objects. 3. All alerts generated from alerting rules defined in `PrometheusRule` objects. 4. All vector selectors of PromQL expressions defined in `PrometheusRule` objects. \n The label will not added for objects referenced in `spec.excludedFromEnforcement`. \n The label's name is this field's value. The label's value is the namespace of the `ServiceMonitor`, `PodMonitor`, `Probe` or `PrometheusRule` object." + type: string + enforcedSampleLimit: + description: "When defined, enforcedSampleLimit specifies a global limit on the number of scraped samples that will be accepted. This overrides any `spec.sampleLimit` set by ServiceMonitor, PodMonitor, Probe objects unless `spec.sampleLimit` is greater than zero and less than than `spec.enforcedSampleLimit`. \n It is meant to be used by admins to keep the overall number of samples/series under a desired limit." + format: int64 + type: integer + enforcedTargetLimit: + description: "When defined, enforcedTargetLimit specifies a global limit on the number of scraped targets. The value overrides any `spec.targetLimit` set by ServiceMonitor, PodMonitor, Probe objects unless `spec.targetLimit` is greater than zero and less than `spec.enforcedTargetLimit`. \n It is meant to be used by admins to to keep the overall number of targets under a desired limit." + format: int64 + type: integer + evaluationInterval: + default: 30s + description: 'Interval between rule evaluations. Default: "30s"' + pattern: ^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$ + type: string + excludedFromEnforcement: + description: "List of references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects to be excluded from enforcing a namespace label of origin. \n It is only applicable if `spec.enforcedNamespaceLabel` set to true." + items: + description: ObjectReference references a PodMonitor, ServiceMonitor, Probe or PrometheusRule object. + properties: + group: + default: monitoring.coreos.com + description: Group of the referent. When not specified, it defaults to `monitoring.coreos.com` + enum: + - monitoring.coreos.com + type: string + name: + description: Name of the referent. When not set, all resources in the namespace are matched. + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + minLength: 1 + type: string + resource: + description: Resource of the referent. + enum: + - prometheusrules + - servicemonitors + - podmonitors + - probes + - scrapeconfigs + type: string + required: + - namespace + - resource + type: object + type: array + exemplars: + description: Exemplars related settings that are runtime reloadable. It requires to enable the `exemplar-storage` feature flag to be effective. + properties: + maxSize: + description: "Maximum number of exemplars stored in memory for all series. \n exemplar-storage itself must be enabled using the `spec.enableFeature` option for exemplars to be scraped in the first place. \n If not set, Prometheus uses its default value. A value of zero or less than zero disables the storage." + format: int64 + type: integer + type: object + externalLabels: + additionalProperties: + type: string + description: The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager). Labels defined by `spec.replicaExternalLabelName` and `spec.prometheusExternalLabelName` take precedence over this list. + type: object + externalUrl: + description: The external URL under which the Prometheus service is externally available. This is necessary to generate correct URLs (for instance if Prometheus is accessible behind an Ingress resource). + type: string + hostAliases: + description: Optional list of hosts and IPs that will be injected into the Pod's hosts file if specified. + items: + description: HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file. + properties: + hostnames: + description: Hostnames for the above IP address. + items: + type: string + type: array + ip: + description: IP address of the host file entry. + type: string + required: + - hostnames + - ip + type: object + type: array + x-kubernetes-list-map-keys: + - ip + x-kubernetes-list-type: map + hostNetwork: + description: "Use the host's network namespace if true. \n Make sure to understand the security implications if you want to enable it (https://kubernetes.io/docs/concepts/configuration/overview/). \n When hostNetwork is enabled, this will set the DNS policy to `ClusterFirstWithHostNet` automatically." + type: boolean + ignoreNamespaceSelectors: + description: When true, `spec.namespaceSelector` from all PodMonitor, ServiceMonitor and Probe objects will be ignored. They will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object. + type: boolean + image: + description: "Container image name for Prometheus. If specified, it takes precedence over the `spec.baseImage`, `spec.tag` and `spec.sha` fields. \n Specifying `spec.version` is still necessary to ensure the Prometheus Operator knows which version of Prometheus is being configured. \n If neither `spec.image` nor `spec.baseImage` are defined, the operator will use the latest upstream version of Prometheus available at the time when the operator was released." + type: string + imagePullPolicy: + description: Image pull policy for the 'prometheus', 'init-config-reloader' and 'config-reloader' containers. See https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy for more details. + enum: + - "" + - Always + - Never + - IfNotPresent + type: string + imagePullSecrets: + description: An optional list of references to Secrets in the same namespace to use for pulling images from registries. See http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + initContainers: + description: "InitContainers allows injecting initContainers to the Pod definition. Those can be used to e.g. fetch secrets for injection into the Prometheus configuration from external sources. Any errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ InitContainers described here modify an operator generated init containers if they share the same name and modifications are done via a strategic merge patch. \n The names of init container name managed by the operator are: * `init-config-reloader`. \n Overriding init containers is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice." + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The container image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod''s termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resizePolicy: + description: Resources resize policy for the container. + items: + description: ContainerResizePolicy represents resource resize policy for the container. + properties: + resourceName: + description: 'Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.' + type: string + restartPolicy: + description: Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired. + type: string + required: + - resourceName + - restartPolicy + type: object + type: array + x-kubernetes-list-type: atomic + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + restartPolicy: + description: 'RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is "Always". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod''s restart policy and the container type. Setting the RestartPolicy as "Always" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy "Always" will be shut down. This lifecycle differs from normal init containers and is often referred to as a "sidecar" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.' + type: string + securityContext: + description: 'SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. If seccomp options are provided at both the pod & container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is "Localhost". Must NOT be set for any other type. + type: string + type: + description: "type indicates which kind of seccomp profile will be applied. Valid options are: \n Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + properties: + port: + description: Port number of the gRPC service. Number must be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). \n If this is not specified, the default behavior is defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header. + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP port. + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + keepDroppedTargets: + description: "Per-scrape limit on the number of targets dropped by relabeling that will be kept in memory. 0 means no limit. \n It requires Prometheus >= v2.47.0." + format: int64 + type: integer + labelLimit: + description: Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.45.0 and newer. + format: int64 + type: integer + labelNameLengthLimit: + description: Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.45.0 and newer. + format: int64 + type: integer + labelValueLengthLimit: + description: Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.45.0 and newer. + format: int64 + type: integer + listenLocal: + description: When true, the Prometheus server listens on the loopback address instead of the Pod IP's address. + type: boolean + logFormat: + description: Log format for Log level for Prometheus and the config-reloader sidecar. + enum: + - "" + - logfmt + - json + type: string + logLevel: + description: Log level for Prometheus and the config-reloader sidecar. + enum: + - "" + - debug + - info + - warn + - error + type: string + maximumStartupDurationSeconds: + description: Defines the maximum time that the `prometheus` container's startup probe will wait before being considered failed. The startup probe will return success after the WAL replay is complete. If set, the value should be greater than 60 (seconds). Otherwise it will be equal to 600 seconds (15 minutes). + format: int32 + minimum: 60 + type: integer + minReadySeconds: + description: "Minimum number of seconds for which a newly created Pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) \n This is an alpha field from kubernetes 1.22 until 1.24 which requires enabling the StatefulSetMinReadySeconds feature gate." + format: int32 + type: integer + nodeSelector: + additionalProperties: + type: string + description: Defines on which Nodes the Pods are scheduled. + type: object + overrideHonorLabels: + description: When true, Prometheus resolves label conflicts by renaming the labels in the scraped data to "exported_