From 596b3e340db483349934debe16bcf6ed2691a68a Mon Sep 17 00:00:00 2001 From: Enxebre Date: Fri, 11 Dec 2020 16:08:10 +0100 Subject: [PATCH] Add capa-controller-manager --- .gitignore | 1 + README.md | 3 +- api/v1alpha1/hosted_controlplane.go | 11 +- api/v1alpha1/openshiftcluster_types.go | 2 + api/v1alpha1/zz_generated.deepcopy.go | 2 + config/cluster-api/kustomization.yaml | 8 +- config/cluster-api/manager-clusterrole.yaml | 4 +- config/cluster-api/manager-deployment.yaml | 1 + config/example-cluster/cluster.yaml | 2 + config/example-cluster/kustomization.yaml | 2 +- ...hift.openshift.io_hostedcontrolplanes.yaml | 8 + ...rshift.openshift.io_openshiftclusters.yaml | 8 + .../assets/controlplane/hypershift/bindata.go | 94 ++++++++++ .../capi/capa-manager-clusterrole.yaml | 171 ++++++++++++++++++ .../capi/capa-manager-clusterrolebinding.yaml | 12 ++ .../capi/capa-manager-deployment.yaml | 51 ++++++ .../capi/capa-manager-serviceaccount.yaml | 4 + .../controllers/controlplane.go | 56 ++++-- .../externalinfracluster_controller.go | 2 +- .../controllers/nodepool_controller.go | 2 +- .../openshiftcluster_controller.go | 11 +- .../controlplane/hypershift/manifests.go | 10 + 22 files changed, 431 insertions(+), 34 deletions(-) create mode 100644 hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrole.yaml create mode 100644 hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrolebinding.yaml create mode 100644 hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-deployment.yaml create mode 100644 hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-serviceaccount.yaml diff --git a/.gitignore b/.gitignore index a5eed30e6c..3183f61d94 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,4 @@ tools/bin config/example-cluster/ssh-key config/example-cluster/pull-secret +config/example-cluster/aws-creds diff --git a/README.md b/README.md index 1ad0c94203..61ec290bdb 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,8 @@ $ make uninstall First, create the following files containing secrets used by the example cluster: - `config/example-cluster/pull-secret` a valid pull secret for image pulls. -- `config/example-cluster/ssh-key` an SSH public key for guest node access +- `config/example-cluster/ssh-key` an SSH public key for guest node access. +- `config/example-cluster/aws-creds` an [aws credentials file](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). Install the example cluster: diff --git a/api/v1alpha1/hosted_controlplane.go b/api/v1alpha1/hosted_controlplane.go index caa965ff71..6cd6d79174 100644 --- a/api/v1alpha1/hosted_controlplane.go +++ b/api/v1alpha1/hosted_controlplane.go @@ -25,11 +25,12 @@ type HostedControlPlane struct { // HostedControlPlaneSpec defines the desired state of HostedControlPlane type HostedControlPlaneSpec struct { - ReleaseImage string `json:"releaseImage"` - PullSecret corev1.LocalObjectReference `json:"pullSecret"` - ServiceCIDR string `json:"serviceCIDR"` - PodCIDR string `json:"podCIDR"` - SSHKey corev1.LocalObjectReference `json:"sshKey"` + ReleaseImage string `json:"releaseImage"` + PullSecret corev1.LocalObjectReference `json:"pullSecret"` + ServiceCIDR string `json:"serviceCIDR"` + PodCIDR string `json:"podCIDR"` + SSHKey corev1.LocalObjectReference `json:"sshKey"` + ProviderCreds corev1.LocalObjectReference `json:"providerCreds"` } // HostedControlPlaneStatus defines the observed state of HostedControlPlane diff --git a/api/v1alpha1/openshiftcluster_types.go b/api/v1alpha1/openshiftcluster_types.go index b19949c399..242adf0488 100644 --- a/api/v1alpha1/openshiftcluster_types.go +++ b/api/v1alpha1/openshiftcluster_types.go @@ -39,6 +39,8 @@ type OpenShiftClusterSpec struct { SSHKey corev1.LocalObjectReference `json:"sshKey"` + ProviderCreds corev1.LocalObjectReference `json:"providerCreds"` + ServiceCIDR string `json:"serviceCIDR"` PodCIDR string `json:"podCIDR"` } diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 9343bd7c72..073b5c5f20 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -208,6 +208,7 @@ func (in *HostedControlPlaneSpec) DeepCopyInto(out *HostedControlPlaneSpec) { *out = *in out.PullSecret = in.PullSecret out.SSHKey = in.SSHKey + out.ProviderCreds = in.ProviderCreds } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostedControlPlaneSpec. @@ -427,6 +428,7 @@ func (in *OpenShiftClusterSpec) DeepCopyInto(out *OpenShiftClusterSpec) { out.Release = in.Release out.PullSecret = in.PullSecret out.SSHKey = in.SSHKey + out.ProviderCreds = in.ProviderCreds } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftClusterSpec. diff --git a/config/cluster-api/kustomization.yaml b/config/cluster-api/kustomization.yaml index 27cf253646..d56dcc782f 100644 --- a/config/cluster-api/kustomization.yaml +++ b/config/cluster-api/kustomization.yaml @@ -7,13 +7,13 @@ resources: - cluster.x-k8s.io_machines.yaml - cluster.x-k8s.io_machinedeployments.yaml - cluster.x-k8s.io_machinehealthchecks.yaml -- manager-serviceaccount.yaml -- manager-clusterrole.yaml -- manager-clusterrolebinding.yaml -- manager-deployment.yaml - infrastructure.cluster.x-k8s.io_awsclusters.yaml - infrastructure.cluster.x-k8s.io_awsmachinepools.yaml - infrastructure.cluster.x-k8s.io_awsmachines.yaml - infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml - infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml - infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml +- manager-serviceaccount.yaml +- manager-clusterrole.yaml +- manager-clusterrolebinding.yaml +- manager-deployment.yaml diff --git a/config/cluster-api/manager-clusterrole.yaml b/config/cluster-api/manager-clusterrole.yaml index f4acdbef3d..cc1d028be2 100644 --- a/config/cluster-api/manager-clusterrole.yaml +++ b/config/cluster-api/manager-clusterrole.yaml @@ -30,8 +30,8 @@ rules: resources: - hostedcontrolplanes - hostedcontrolplanes/status - - guestclusters - - guestclusters/status + - externalinfraclusters + - externalinfraclusters/status verbs: - '*' - apiGroups: diff --git a/config/cluster-api/manager-deployment.yaml b/config/cluster-api/manager-deployment.yaml index 266dc390a9..641648dbb6 100644 --- a/config/cluster-api/manager-deployment.yaml +++ b/config/cluster-api/manager-deployment.yaml @@ -22,4 +22,5 @@ spec: - /manager args: - --namespace=hypershift + - --alsologtostderr - --v=4 diff --git a/config/example-cluster/cluster.yaml b/config/example-cluster/cluster.yaml index d79202e52d..fc62bb2e3f 100644 --- a/config/example-cluster/cluster.yaml +++ b/config/example-cluster/cluster.yaml @@ -12,3 +12,5 @@ spec: name: pull-secret sshKey: name: ssh-key + providerCreds: + name: provider-creds \ No newline at end of file diff --git a/config/example-cluster/kustomization.yaml b/config/example-cluster/kustomization.yaml index b153927df9..3ee0cc58a9 100644 --- a/config/example-cluster/kustomization.yaml +++ b/config/example-cluster/kustomization.yaml @@ -20,7 +20,7 @@ secretGenerator: files: - id_rsa.pub=ssh-key type: Opaque -- name: aws-creds +- name: provider-creds options: disableNameSuffixHash: true files: diff --git a/config/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml b/config/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml index 796a73ce4d..f4a25883e9 100644 --- a/config/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml +++ b/config/hypershift-operator/hypershift.openshift.io_hostedcontrolplanes.yaml @@ -39,6 +39,13 @@ spec: properties: podCIDR: type: string + providerCreds: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object pullSecret: description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. properties: @@ -59,6 +66,7 @@ spec: type: object required: - podCIDR + - providerCreds - pullSecret - releaseImage - serviceCIDR diff --git a/config/hypershift-operator/hypershift.openshift.io_openshiftclusters.yaml b/config/hypershift-operator/hypershift.openshift.io_openshiftclusters.yaml index e77e663b4a..cce30fa06e 100644 --- a/config/hypershift-operator/hypershift.openshift.io_openshiftclusters.yaml +++ b/config/hypershift-operator/hypershift.openshift.io_openshiftclusters.yaml @@ -39,6 +39,13 @@ spec: type: integer podCIDR: type: string + providerCreds: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object pullSecret: description: PullSecret is a pull secret injected into the container runtime of guest workers. It should have an ".dockerconfigjson" key containing the pull secret JSON. properties: @@ -68,6 +75,7 @@ spec: required: - initialComputeReplicas - podCIDR + - providerCreds - pullSecret - release - serviceCIDR diff --git a/hypershift-operator/assets/controlplane/hypershift/bindata.go b/hypershift-operator/assets/controlplane/hypershift/bindata.go index 50ce5085a3..357d81c826 100644 --- a/hypershift-operator/assets/controlplane/hypershift/bindata.go +++ b/hypershift-operator/assets/controlplane/hypershift/bindata.go @@ -5,6 +5,10 @@ // hypershift-operator/assets/controlplane/hypershift/apiserver-haproxy/kube-apiserver-proxy.yaml (709B) // hypershift-operator/assets/controlplane/hypershift/apiserver-haproxy/setup-apiserver-ip.sh (206B) // hypershift-operator/assets/controlplane/hypershift/apiserver-haproxy/teardown-apiserver-ip.sh (168B) +// hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrole.yaml (2.963kB) +// hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrolebinding.yaml (303B) +// hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-deployment.yaml (1.335kB) +// hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-serviceaccount.yaml (77B) // hypershift-operator/assets/controlplane/hypershift/cluster-bootstrap/00000_namespaces-needed-for-monitoring.yaml (770B) // hypershift-operator/assets/controlplane/hypershift/cluster-bootstrap/cluster-config-v1-configmap.yaml (338B) // hypershift-operator/assets/controlplane/hypershift/cluster-bootstrap/cluster-dns-02-config.yaml (146B) @@ -302,6 +306,86 @@ func apiserverHaproxyTeardownApiserverIpSh() (*asset, error) { return a, nil } +var _capiCapaManagerClusterroleYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xdc\x94\xc1\x6e\xa3\x40\x0c\x86\xef\x3c\xc5\x28\x17\xa4\x48\x10\xed\x6d\xc5\x75\x0f\x7b\x5f\xad\x7a\x77\x06\x27\x8c\x32\xcc\x8c\x6c\x4f\x92\xf6\xe9\xab\x40\x94\xd2\x12\x54\xd2\x92\x36\x0d\xa7\xb1\xcd\x2f\x3e\xff\x63\x0c\xc1\x3c\x20\xb1\xf1\xae\x50\xb4\x04\x9d\x43\x94\xca\x93\x79\x02\x31\xde\xe5\x9b\xdf\x9c\x1b\xbf\xd8\xfe\x4a\x36\xc6\x95\x85\xfa\x63\x23\x0b\xd2\x3f\x6f\x31\xa9\x51\xa0\x04\x81\x22\x51\x4a\x13\x36\x82\xff\xa6\x46\x16\xa8\x43\xa1\x5c\xb4\x36\x51\xca\x41\x8d\x85\xd2\x10\x20\xab\xc1\xc1\x1a\x29\xa3\x83\x9a\xa2\x45\x3e\x48\x33\x05\xc1\xfc\x25\x1f\x43\x13\x1e\x9e\x4c\xcd\x66\xcd\x91\x90\x7d\x24\x8d\x9d\x0a\x6e\xd1\x09\x37\xe1\x16\x69\xd9\xa9\x34\x0c\x78\x0a\xd7\x28\xa7\xb3\x35\xfc\x12\x04\x10\x5d\x9d\xa2\x18\xca\xae\x6a\x77\x2c\x5e\x48\xc5\xa8\x09\x47\x61\x95\x68\xf1\xba\x94\xba\xbd\xa3\x7c\x9f\xb5\xb7\x37\xc4\x7c\x7c\x8f\x7b\x89\x05\x0b\x48\x3c\xdb\xcc\x20\xee\x24\x44\x35\xe8\xca\x38\xe4\x5e\x62\x7a\x22\xef\x84\xbc\x0d\x16\x1c\xe6\x63\xf1\x60\xc7\xed\x08\x97\x5d\x39\xbf\x57\x9f\x1c\x1e\xf7\x61\x34\xf3\xd1\xc1\xe0\xbd\xed\xd9\xda\x24\x27\xa7\x33\x6e\x45\xc0\x42\x51\x4b\xa4\x8b\xcc\x7d\x35\x90\xdf\xfd\x17\x7d\xbe\x8d\x91\xce\x0e\x20\x4e\x0c\xd5\x9b\x83\x9f\xeb\xef\x07\xa6\xf7\x4b\x3d\xbe\x13\x7f\x6f\xcd\xdb\x76\xb1\xde\xc7\x8a\x78\xd3\xcd\x2d\x3a\x7d\x57\x0b\xa3\xd7\xd1\x15\x1c\xaf\x1e\x03\x12\x57\x66\x25\xb9\x0f\xe8\xda\xd3\x30\x57\x3a\x4f\xcf\x7d\x3d\x9d\xa7\xcf\x01\x00\x00\xff\xff\xfd\xe2\xfa\x51\x93\x0b\x00\x00") + +func capiCapaManagerClusterroleYamlBytes() ([]byte, error) { + return bindataRead( + _capiCapaManagerClusterroleYaml, + "capi/capa-manager-clusterrole.yaml", + ) +} + +func capiCapaManagerClusterroleYaml() (*asset, error) { + bytes, err := capiCapaManagerClusterroleYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "capi/capa-manager-clusterrole.yaml", size: 2963, mode: os.FileMode(0644), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd3, 0xef, 0x4a, 0x71, 0xb7, 0x5d, 0xb, 0xf2, 0xfe, 0xd5, 0x6a, 0x40, 0xd1, 0xd7, 0x9a, 0x75, 0xd4, 0x91, 0xf2, 0xaf, 0x32, 0x5f, 0xe9, 0x8c, 0x6b, 0x9, 0x15, 0xde, 0x61, 0xfd, 0x5f, 0x4a}} + return a, nil +} + +var _capiCapaManagerClusterrolebindingYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x8d\xb1\x4a\x04\x41\x0c\x86\xfb\x79\x8a\xbc\xc0\x8e\xd8\xc9\x74\x6a\x61\x67\x71\x82\x7d\x36\x1b\xcf\x78\xb3\xc9\x90\xc9\x5c\xe1\x71\xef\x2e\x0b\x77\x82\x88\x76\x09\x7c\xff\xf7\x61\x93\x57\xf6\x2e\xa6\x05\x7c\x46\xca\x38\xe2\xdd\x5c\x3e\x31\xc4\x34\x1f\xee\x7a\x16\xbb\x39\xde\xa6\x83\xe8\x52\xe0\xb1\x8e\x1e\xec\x3b\xab\xfc\x20\xba\x88\xee\xd3\xca\x81\x0b\x06\x96\x04\xa0\xb8\x72\x01\xc2\x86\xd3\x8a\x8a\x7b\xf6\xc9\xad\xf2\x7c\x41\xb7\x7b\xc7\x6f\x1b\x89\x4d\x9e\xdc\x46\xfb\xa7\x9a\x00\x7e\x45\xff\x6e\xa4\x3e\xe6\x0f\xa6\xe8\x9b\x7d\xba\x2c\x5f\xd8\x8f\x42\x7c\x4f\x64\x43\x23\x01\xfc\x98\x93\x69\xb8\xd5\xca\x7e\x35\x7d\x13\xbd\x21\x71\x81\xd3\x09\xf2\xf3\xf5\x85\xf3\x39\x7d\x05\x00\x00\xff\xff\x19\xd2\x37\xd4\x2f\x01\x00\x00") + +func capiCapaManagerClusterrolebindingYamlBytes() ([]byte, error) { + return bindataRead( + _capiCapaManagerClusterrolebindingYaml, + "capi/capa-manager-clusterrolebinding.yaml", + ) +} + +func capiCapaManagerClusterrolebindingYaml() (*asset, error) { + bytes, err := capiCapaManagerClusterrolebindingYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "capi/capa-manager-clusterrolebinding.yaml", size: 303, mode: os.FileMode(0644), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x15, 0xa9, 0x96, 0x70, 0xae, 0x9, 0x18, 0x78, 0x87, 0x84, 0xfb, 0x4, 0xac, 0x2d, 0x55, 0x64, 0x55, 0xf5, 0xab, 0x37, 0xf6, 0xbe, 0xa2, 0x5f, 0x7, 0xa7, 0x9c, 0x39, 0xb5, 0x22, 0xb3, 0x4b}} + return a, nil +} + +var _capiCapaManagerDeploymentYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x53\x4d\x6f\xdb\x30\x0c\xbd\xfb\x57\xe8\x0f\xb8\x59\x81\x5e\x26\x60\x87\xa0\xcd\xba\x02\x5d\x61\x34\xc5\x76\x2c\x58\x99\x8d\x85\x52\xa2\x46\xd1\x2e\xbc\x5f\x3f\xa8\x49\x0a\xc7\x59\xbb\x0f\x9d\x6c\x8a\xef\x3d\x3e\x8a\xac\xeb\xba\x82\xe4\xbf\xa1\x64\xcf\xd1\x1a\x48\x29\x2f\x86\xd3\xea\xc9\xc7\xd6\x9a\x0b\x4c\xc4\x63\xc0\xa8\x55\x40\x85\x16\x14\x6c\x65\x4c\x84\x80\xd6\x38\x48\x50\x3b\x8e\x2a\x4c\x84\x52\x07\x88\xb0\x41\xa9\x8c\x21\x78\x40\xca\x25\xd3\x98\x5d\x42\x9d\x08\xe2\x3b\xa0\x9c\xd0\x15\x40\x46\x42\xa7\x2c\x5b\x70\x00\x75\xdd\xf5\x84\xed\xaf\xf9\x8c\x11\x4c\xe4\x1d\x64\x6b\x4e\x2b\x63\x14\x43\x22\x50\xdc\xf1\x4e\xcc\x94\x43\x07\x12\xff\x20\x62\xcc\xbe\xf0\x97\x6f\x94\xc1\x3b\x5c\x3a\xc7\x7d\xd4\x9b\x3f\x74\x69\x2f\x04\x3e\xa2\x4c\xc4\x6b\x03\xb2\x99\xfc\x6f\x63\x75\x0d\x94\x99\x78\xa3\x9c\xb5\x45\x91\xa3\xfb\xe1\xd3\xd9\x24\xe6\x03\x6c\xd0\x9a\x1f\x3d\x8c\x27\x9e\x17\xdd\x98\x50\x72\xe7\x1f\x75\xe1\xa8\xcf\x8a\x52\x43\xf2\x75\x12\x1e\x7c\x5b\x7e\x9e\xb3\x0d\x50\xe2\x73\x8e\xa6\x27\x6a\x98\xbc\x1b\xad\x59\xd2\x33\x8c\x79\x92\xb1\x9d\x84\x43\x4f\xe5\x24\x16\x3d\xb2\xf0\x6a\xb6\x61\x51\x6b\x3e\x9e\x9d\x7d\x38\xc8\xd8\xd3\x75\x08\xa4\xdd\xcf\xd9\x5d\x12\x56\x76\x4c\xd6\xdc\x9d\x37\x93\x3b\x41\x68\x7d\xc4\x9c\x1b\xe1\x07\x3c\xd4\xec\x54\xd3\x25\xaa\x9d\x53\x81\x76\xd6\x2c\x0a\x72\x3c\x92\x79\xa9\xed\xb8\x04\xf2\x03\xfe\x97\xca\x1b\x6e\xde\x90\xc1\x38\xcc\xdb\xb6\x6d\xca\xf2\xfb\xfa\x7e\xfd\x65\x79\xbb\xba\xb8\x3f\xbf\x5d\x5d\xac\x6e\xee\xae\x96\xd7\xeb\xfb\xcf\x57\xd7\xab\x19\xf5\x00\xd4\x63\x11\xe6\x80\x8b\x13\x78\xce\x0b\x27\xd8\x62\x54\x0f\x34\x7d\xbb\x81\xa9\x0f\xf8\xb5\x8c\xea\xd1\x53\xed\x36\xfc\xb7\xb8\x72\x42\x41\x35\x3b\x83\x7b\x9d\x6a\xca\x7b\x30\xd0\xef\xd1\x65\x74\x32\x6f\xde\x36\xb6\x5d\xa0\xd7\x11\x2d\xf0\x3d\x50\x51\x82\x8f\xa0\x9e\xe3\xa5\x80\xc3\x06\xc5\x73\xbb\x46\xc7\xb1\x2d\x1b\xbf\x1f\x2c\x65\x42\x79\x49\x3b\xa8\x07\x1f\x1f\xd1\xa9\x35\x37\xbc\x76\x1d\xb6\x3d\xe1\x44\xfe\x09\x47\x6b\x22\xb7\x58\x0b\x13\x9e\x3c\xf5\x0f\x28\x11\x15\x73\xd9\xa3\xdd\x8e\xfc\x0a\x00\x00\xff\xff\xd3\xe4\x2f\x44\x37\x05\x00\x00") + +func capiCapaManagerDeploymentYamlBytes() ([]byte, error) { + return bindataRead( + _capiCapaManagerDeploymentYaml, + "capi/capa-manager-deployment.yaml", + ) +} + +func capiCapaManagerDeploymentYaml() (*asset, error) { + bytes, err := capiCapaManagerDeploymentYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "capi/capa-manager-deployment.yaml", size: 1335, mode: os.FileMode(0644), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x1c, 0x2d, 0xff, 0x4d, 0x81, 0xc9, 0xa, 0x62, 0xd4, 0x45, 0xfd, 0x3a, 0x28, 0x94, 0x51, 0x99, 0xbd, 0xcf, 0x70, 0xa8, 0x4c, 0xfe, 0x1b, 0xa2, 0xa, 0x19, 0x9, 0xe1, 0x5c, 0x74, 0x88, 0x1e}} + return a, nil +} + +var _capiCapaManagerServiceaccountYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x04\xc0\xb1\x0d\x03\x31\x08\x05\xd0\xde\x53\xb0\x80\x8b\xb4\x74\x99\x21\x52\xfa\x2f\x8c\x22\x14\x1b\x2c\x8e\xf3\xfc\xf7\xb0\xed\xab\x79\x59\x38\xd3\x79\xb5\xbf\xf9\x60\xfa\x68\x1e\x13\x7d\x8b\xc4\xed\xd5\x96\x16\x06\x0a\xdc\x88\x1c\x4b\x99\x04\x1b\x5d\xc2\x2b\x63\x4e\xcd\xbe\xe0\xf8\x69\x3e\x01\x00\x00\xff\xff\xfb\x85\xa6\xe2\x4d\x00\x00\x00") + +func capiCapaManagerServiceaccountYamlBytes() ([]byte, error) { + return bindataRead( + _capiCapaManagerServiceaccountYaml, + "capi/capa-manager-serviceaccount.yaml", + ) +} + +func capiCapaManagerServiceaccountYaml() (*asset, error) { + bytes, err := capiCapaManagerServiceaccountYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "capi/capa-manager-serviceaccount.yaml", size: 77, mode: os.FileMode(0644), modTime: time.Unix(1, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb6, 0x54, 0x7e, 0x1a, 0x44, 0x8f, 0xaf, 0xe, 0x93, 0x53, 0x41, 0x28, 0x8e, 0xb2, 0xf2, 0x46, 0x9a, 0x91, 0x50, 0xa4, 0x97, 0x76, 0x96, 0xee, 0x60, 0xea, 0x68, 0x5d, 0x19, 0x30, 0x15, 0xea}} + return a, nil +} + var _clusterBootstrap00000_namespacesNeededForMonitoringYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xac\x92\x31\x6e\xc3\x30\x0c\x45\x77\x9d\x82\xc8\xae\x14\x5d\x75\x88\x8e\xdd\x19\x99\xb5\x09\xcb\xa4\x40\x52\x29\x7a\xfb\x22\x43\xd1\x14\x9d\x0c\x7b\xff\x78\x7a\x7a\x60\xce\x39\x61\xe7\x77\x32\x67\x95\x02\xf7\xd7\xb4\xb2\x4c\x05\xde\x70\x23\xef\x58\x29\x6d\x14\x38\x61\x60\x49\x00\x82\x1b\x15\xd0\x4e\xe2\x0b\x7f\x44\xc6\xce\x4e\x76\x27\x4b\x07\x41\x55\x25\x4c\x5b\x23\xcb\x1b\x0a\xce\xc7\x89\xeb\xb8\xd1\x93\x1f\x40\xc3\x1b\x35\x7f\x4c\xe1\x77\x76\x65\x7d\xa9\x6d\x78\x3c\x1e\x56\xe1\x50\x63\x99\x0b\x5c\xc2\x06\x5d\x4e\x71\xf0\xba\xd0\x34\xda\x49\x3f\x3a\x3f\x14\x45\x9d\xf6\x32\x9e\x5b\x0a\xc5\xa7\xda\x7a\xfd\xd3\xb4\x6b\xe3\xfa\x95\x67\xd3\xd1\x0b\xb0\xcc\x46\xee\xbb\xd2\xff\x17\xfd\xa1\x1c\xbd\xd9\x11\x0b\x49\x70\xc5\x60\x95\xf4\x1d\x00\x00\xff\xff\x97\xb6\x41\xc5\x02\x03\x00\x00") func clusterBootstrap00000_namespacesNeededForMonitoringYamlBytes() ([]byte, error) { @@ -2978,6 +3062,10 @@ var _bindata = map[string]func() (*asset, error){ "apiserver-haproxy/kube-apiserver-proxy.yaml": apiserverHaproxyKubeApiserverProxyYaml, "apiserver-haproxy/setup-apiserver-ip.sh": apiserverHaproxySetupApiserverIpSh, "apiserver-haproxy/teardown-apiserver-ip.sh": apiserverHaproxyTeardownApiserverIpSh, + "capi/capa-manager-clusterrole.yaml": capiCapaManagerClusterroleYaml, + "capi/capa-manager-clusterrolebinding.yaml": capiCapaManagerClusterrolebindingYaml, + "capi/capa-manager-deployment.yaml": capiCapaManagerDeploymentYaml, + "capi/capa-manager-serviceaccount.yaml": capiCapaManagerServiceaccountYaml, "cluster-bootstrap/00000_namespaces-needed-for-monitoring.yaml": clusterBootstrap00000_namespacesNeededForMonitoringYaml, "cluster-bootstrap/cluster-config-v1-configmap.yaml": clusterBootstrapClusterConfigV1ConfigmapYaml, "cluster-bootstrap/cluster-dns-02-config.yaml": clusterBootstrapClusterDns02ConfigYaml, @@ -3160,6 +3248,12 @@ var _bintree = &bintree{nil, map[string]*bintree{ "setup-apiserver-ip.sh": {apiserverHaproxySetupApiserverIpSh, map[string]*bintree{}}, "teardown-apiserver-ip.sh": {apiserverHaproxyTeardownApiserverIpSh, map[string]*bintree{}}, }}, + "capi": {nil, map[string]*bintree{ + "capa-manager-clusterrole.yaml": {capiCapaManagerClusterroleYaml, map[string]*bintree{}}, + "capa-manager-clusterrolebinding.yaml": {capiCapaManagerClusterrolebindingYaml, map[string]*bintree{}}, + "capa-manager-deployment.yaml": {capiCapaManagerDeploymentYaml, map[string]*bintree{}}, + "capa-manager-serviceaccount.yaml": {capiCapaManagerServiceaccountYaml, map[string]*bintree{}}, + }}, "cluster-bootstrap": {nil, map[string]*bintree{ "00000_namespaces-needed-for-monitoring.yaml": {clusterBootstrap00000_namespacesNeededForMonitoringYaml, map[string]*bintree{}}, "cluster-config-v1-configmap.yaml": {clusterBootstrapClusterConfigV1ConfigmapYaml, map[string]*bintree{}}, diff --git a/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrole.yaml b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrole.yaml new file mode 100644 index 0000000000..7244efd344 --- /dev/null +++ b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrole.yaml @@ -0,0 +1,171 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: capa-manager-role +rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - cluster.x-k8s.io + resources: + - clusters + - clusters/status + verbs: + - get + - list + - watch + - apiGroups: + - cluster.x-k8s.io + resources: + - machines + - machines/status + verbs: + - get + - list + - watch + - apiGroups: + - controlplane.cluster.x-k8s.io + resources: + - awsmanagedcontrolplanes + - awsmanagedcontrolplanes/status + verbs: + - get + - list + - watch + - apiGroups: + - exp.cluster.x-k8s.io + resources: + - machinepools + - machinepools/status + verbs: + - get + - list + - watch + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsclusters/status + verbs: + - get + - patch + - update + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmachinepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmachinepools/status + verbs: + - get + - patch + - update + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmachines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmachines/status + verbs: + - get + - patch + - update + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedclusters/status + verbs: + - get + - patch + - update + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedmachinepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmanagedmachinepools/status + verbs: + - get + - patch + - update + - apiGroups: + - hypershift.openshift.io + resources: + - '*' + verbs: + - '*' \ No newline at end of file diff --git a/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrolebinding.yaml b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrolebinding.yaml new file mode 100644 index 0000000000..c7e0c16ce7 --- /dev/null +++ b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: capa-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: capa-manager-role +subjects: + - kind: ServiceAccount + name: capa-controller-manager + namespace: {{ .Namespace }} diff --git a/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-deployment.yaml b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-deployment.yaml new file mode 100644 index 0000000000..cdc01c2622 --- /dev/null +++ b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-deployment.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: capa-controller-manager + labels: + control-plane: capa-controller-manager +spec: + selector: + matchLabels: + control-plane: capa-controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: capa-controller-manager + spec: + serviceAccountName: capa-controller-manager + containers: + - args: + - --alsologtostderr + - --v=4 + image: quay.io/hypershift/cluster-api-provider-aws:master + imagePullPolicy: Always + name: manager + ports: + - containerPort: 9440 + name: healthz + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + env: + - name: AWS_SHARED_CREDENTIALS_FILE + value: /home/.aws/credentials + volumeMounts: + - name: credentials + mountPath: /home/.aws + volumes: + - name: credentials + secret: + secretName: provider-creds + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master diff --git a/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-serviceaccount.yaml b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-serviceaccount.yaml new file mode 100644 index 0000000000..4637baaae4 --- /dev/null +++ b/hypershift-operator/assets/controlplane/hypershift/capi/capa-manager-serviceaccount.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: capa-controller-manager \ No newline at end of file diff --git a/hypershift-operator/controllers/controlplane.go b/hypershift-operator/controllers/controlplane.go index 15fcc11407..cb6428731f 100644 --- a/hypershift-operator/controllers/controlplane.go +++ b/hypershift-operator/controllers/controlplane.go @@ -12,6 +12,7 @@ import ( "math/big" "math/rand" "os" + "strings" "sigs.k8s.io/cluster-api/util" @@ -68,6 +69,17 @@ func (r *HostedControlPlaneReconciler) ensureControlPlane(ctx context.Context, h if !hasPullSecretData { return fmt.Errorf("pull secret %s is missing the .dockerconfigjson key", hcp.Spec.PullSecret.Name) } + + var providerCredsSecret corev1.Secret + err = r.Client.Get(ctx, client.ObjectKey{Namespace: hcp.Namespace, Name: hcp.Spec.ProviderCreds.Name}, &providerCredsSecret) + if err != nil { + return fmt.Errorf("failed to get provider creds %s: %w", hcp.Spec.ProviderCreds.Name, err) + } + providerCredsData, hasProviderCredsData := providerCredsSecret.Data["credentials"] + if !hasProviderCredsData { + return fmt.Errorf("provider credentials %s is missing the credentials key", hcp.Spec.PullSecret.Name) + } + version, err := semver.Parse(releaseImage.Version()) if err != nil { return fmt.Errorf("cannot parse release version (%s): %v", releaseImage.Version(), err) @@ -174,17 +186,17 @@ func (r *HostedControlPlaneReconciler) ensureControlPlane(ctx context.Context, h } // Create oauth branding manifest because it cannot be applied - //manifestBytes := manifests[oauthBrandingManifest] - //manifestObj := &unstructured.Unstructured{} - //if err := yaml.NewYAMLOrJSONDecoder(strings.NewReader(string(manifestBytes)), 100).Decode(manifestObj); err != nil { - // return fmt.Errorf("failed to decode manifest %s: %w", oauthBrandingManifest, err) - //} - //manifestObj.SetNamespace(name) - //if err = r.Create(context.TODO(), manifestObj); err != nil { - // if !apierrors.IsAlreadyExists(err) { - // return fmt.Errorf("failed to apply manifest %s: %w", oauthBrandingManifest, err) - // } - //} + manifestBytes := manifests[oauthBrandingManifest] + manifestObj := &unstructured.Unstructured{} + if err := yaml.NewYAMLOrJSONDecoder(strings.NewReader(string(manifestBytes)), 100).Decode(manifestObj); err != nil { + return fmt.Errorf("failed to decode manifest %s: %w", oauthBrandingManifest, err) + } + manifestObj.SetNamespace(name) + if err = r.Create(context.TODO(), manifestObj); err != nil { + if !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to apply manifest %s: %w", oauthBrandingManifest, err) + } + } // Use server side apply for manifestss applyErrors := []error{} @@ -242,15 +254,15 @@ func (r *HostedControlPlaneReconciler) ensureControlPlane(ctx context.Context, h if err != nil { return fmt.Errorf("failed to create kubeconfig secret manifest for management cluster: %w", err) } - if err := r.Create(ctx, kubeconfigSecret); err != nil && !apierrors.IsAlreadyExists(err) { - return fmt.Errorf("failed to generate kubeconfigSecret: %w", err) - } kubeconfigSecret.OwnerReferences = util.EnsureOwnerRef(kubeconfigSecret.OwnerReferences, metav1.OwnerReference{ APIVersion: hyperv1.GroupVersion.String(), Kind: "HostedControlPlane", Name: hcp.GetName(), UID: hcp.UID, }) + if err := r.Create(ctx, kubeconfigSecret); err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to generate kubeconfigSecret: %w", err) + } targetPullSecret, err := generateTargetPullSecret(r.Scheme(), pullSecretData, name) if err != nil { @@ -260,6 +272,13 @@ func (r *HostedControlPlaneReconciler) ensureControlPlane(ctx context.Context, h return fmt.Errorf("failed to generate targetPullSecret: %v", err) } + targetProviderCredsSecret, err := generateTargetProviderCredsSecret(providerCredsData, name) + if err != nil { + return fmt.Errorf("failed to create providerCreds secret manifest for target cluster: %w", err) + } + if err := r.Create(ctx, targetProviderCredsSecret); err != nil && !apierrors.IsAlreadyExists(err) { + return fmt.Errorf("failed to generate providerCreds secret: %v", err) + } log.Infof("Cluster API URL: %s", fmt.Sprintf("https://%s:%d", infraStatus.APIAddress, APIServerPort)) log.Infof("Kubeconfig is available in secret %q in the %s namespace", fmt.Sprintf("%s-kubeconfig", name), hcp.GetNamespace()) log.Infof("Console URL: %s", fmt.Sprintf("https://console-openshift-console.%s", params.IngressSubdomain)) @@ -268,6 +287,15 @@ func (r *HostedControlPlaneReconciler) ensureControlPlane(ctx context.Context, h return nil } +func generateTargetProviderCredsSecret(data []byte, namespace string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + secret.Name = "provider-creds" + secret.Namespace = namespace + secret.Data = map[string][]byte{"credentials": data} + secret.Type = corev1.SecretTypeOpaque + return secret, nil +} + func generateTargetPullSecret(scheme *runtime.Scheme, data []byte, namespace string) (*corev1.ConfigMap, error) { secret := &corev1.Secret{} secret.Name = "pull-secret" diff --git a/hypershift-operator/controllers/externalinfracluster_controller.go b/hypershift-operator/controllers/externalinfracluster_controller.go index 238dc6fc80..5e0b80b26b 100644 --- a/hypershift-operator/controllers/externalinfracluster_controller.go +++ b/hypershift-operator/controllers/externalinfracluster_controller.go @@ -50,7 +50,7 @@ func (r *ExternalInfraClusterReconciler) SetupWithManager(mgr ctrl.Manager) erro } r.Infra = &infra - r.recorder = mgr.GetEventRecorderFor("guest-cluster-controller") + r.recorder = mgr.GetEventRecorderFor("external-infra-controller") return nil } diff --git a/hypershift-operator/controllers/nodepool_controller.go b/hypershift-operator/controllers/nodepool_controller.go index fd78e07c86..d7a4199bf5 100644 --- a/hypershift-operator/controllers/nodepool_controller.go +++ b/hypershift-operator/controllers/nodepool_controller.go @@ -52,7 +52,7 @@ func (r *NodePoolReconciler) SetupWithManager(mgr ctrl.Manager) error { } r.Infra = &infra - r.recorder = mgr.GetEventRecorderFor("guest-cluster-controller") + r.recorder = mgr.GetEventRecorderFor("nodepool-controller") return nil } diff --git a/hypershift-operator/controllers/openshiftcluster_controller.go b/hypershift-operator/controllers/openshiftcluster_controller.go index 4116f2bd19..8ea3f4fbc2 100644 --- a/hypershift-operator/controllers/openshiftcluster_controller.go +++ b/hypershift-operator/controllers/openshiftcluster_controller.go @@ -132,11 +132,12 @@ func (r *OpenShiftClusterReconciler) Reconcile(ctx context.Context, req ctrl.Req Name: ocluster.GetName(), }, Spec: hyperv1.HostedControlPlaneSpec{ - PullSecret: ocluster.Spec.PullSecret, - ServiceCIDR: ocluster.Spec.ServiceCIDR, - PodCIDR: ocluster.Spec.PodCIDR, - SSHKey: ocluster.Spec.SSHKey, - ReleaseImage: ocluster.Spec.Release.Image, + ProviderCreds: ocluster.Spec.ProviderCreds, + PullSecret: ocluster.Spec.PullSecret, + ServiceCIDR: ocluster.Spec.ServiceCIDR, + PodCIDR: ocluster.Spec.PodCIDR, + SSHKey: ocluster.Spec.SSHKey, + ReleaseImage: ocluster.Spec.Release.Image, }, } ExternalInfraCluster := &hyperv1.ExternalInfraCluster{ diff --git a/hypershift-operator/render/controlplane/hypershift/manifests.go b/hypershift-operator/render/controlplane/hypershift/manifests.go index 13fb8aad0b..48567850fa 100644 --- a/hypershift-operator/render/controlplane/hypershift/manifests.go +++ b/hypershift-operator/render/controlplane/hypershift/manifests.go @@ -74,6 +74,7 @@ func (c *clusterManifestContext) setupManifests() { c.routerProxy() c.machineConfigServer() c.ignitionConfigs() + c.capi() } func (c *clusterManifestContext) serviceAdminKubeconfig() { @@ -149,6 +150,15 @@ func (c *clusterManifestContext) kubeAPIServer() { ) } +func (c *clusterManifestContext) capi() { + c.addManifestFiles( + "capi/capa-manager-serviceaccount.yaml", + "capi/capa-manager-clusterrole.yaml", + "capi/capa-manager-clusterrolebinding.yaml", + "capi/capa-manager-deployment.yaml", + ) +} + func (c *clusterManifestContext) kubeControllerManager() { c.addManifestFiles( "kube-controller-manager/kube-controller-manager-deployment.yaml",