Skip to content

Commit

Permalink
Merge pull request #5188 from muraee/cpo-v2-cleanup
Browse files Browse the repository at this point in the history
NO-JIRA: CPO V2 cleanup
  • Loading branch information
openshift-merge-bot[bot] authored Nov 29, 2024
2 parents 0d9e89f + 662b7e0 commit d5762c5
Show file tree
Hide file tree
Showing 92 changed files with 565 additions and 440 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1672,8 +1672,11 @@ func TestControlPlaneComponents(t *testing.T) {
Build()
cpContext.Client = fakeClient

if err := component.Reconcile(cpContext); err != nil {
t.Fatalf("failed to reconcile component %s: %v", component.Name(), err)
// Reconcile multiple times to make sure multiple runs don't produce different results.
for i := 0; i < 2; i++ {
if err := component.Reconcile(cpContext); err != nil {
t.Fatalf("failed to reconcile component %s: %v", component.Name(), err)
}
}

var deployments appsv1.DeploymentList
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,16 @@ metadata:
uid: ""
resourceVersion: "1"
spec:
replicas: 1
replicas: 2
revisionHistoryLimit: 2
selector:
matchLabels:
app: cloud-controller-manager
strategy: {}
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
annotations:
Expand Down Expand Up @@ -57,6 +61,22 @@ spec:
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
weight: 100
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: cloud-controller-manager
hypershift.openshift.io/control-plane-component: cloud-controller-manager-kubevirt
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/need-management-kas-access: "true"
topologyKey: topology.kubernetes.io/zone
- labelSelector:
matchLabels:
app: cloud-controller-manager
hypershift.openshift.io/control-plane-component: cloud-controller-manager-kubevirt
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/need-management-kas-access: "true"
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ spec:
whenDeleted: Retain
whenScaled: Retain
podManagementPolicy: Parallel
replicas: 1
replicas: 3
revisionHistoryLimit: 10
selector:
matchLabels:
Expand Down Expand Up @@ -59,6 +59,22 @@ spec:
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
weight: 100
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: etcd
hypershift.openshift.io/control-plane-component: etcd
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/need-management-kas-access: "true"
topologyKey: topology.kubernetes.io/zone
- labelSelector:
matchLabels:
app: etcd
hypershift.openshift.io/control-plane-component: etcd
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/need-management-kas-access: "true"
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: true
containers:
- command:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,12 @@ spec: {}
status:
conditions:
- lastTransitionTime: null
message: 'etcd StatefulSet is not available: 0/1 replicas ready'
message: 'etcd StatefulSet is not available: 0/3 replicas ready'
reason: WaitingForAvailable
status: "False"
type: Available
- lastTransitionTime: null
message: 'etcd StatefulSet progressing: 0/1 replicas ready'
message: 'etcd StatefulSet progressing: 0/3 replicas ready'
reason: WaitingForAvailable
status: "True"
type: Progressing
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ metadata:
uid: ""
resourceVersion: "1"
spec:
replicas: 1
replicas: 3
revisionHistoryLimit: 2
selector:
matchLabels:
app: kube-apiserver
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
Expand Down Expand Up @@ -63,6 +63,22 @@ spec:
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
weight: 100
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: kube-apiserver
hypershift.openshift.io/control-plane-component: kube-apiserver
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/request-serving-component: "true"
topologyKey: topology.kubernetes.io/zone
- labelSelector:
matchLabels:
app: kube-apiserver
hypershift.openshift.io/control-plane-component: kube-apiserver
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/request-serving-component: "true"
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: false
containers:
- args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@ metadata:
uid: ""
resourceVersion: "1"
spec:
replicas: 1
replicas: 3
revisionHistoryLimit: 2
selector:
matchLabels:
app: openshift-apiserver
strategy:
rollingUpdate:
maxSurge: 3
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
Expand Down Expand Up @@ -62,6 +62,20 @@ spec:
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
weight: 100
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: openshift-apiserver
hypershift.openshift.io/control-plane-component: openshift-apiserver
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: topology.kubernetes.io/zone
- labelSelector:
matchLabels:
app: openshift-apiserver
hypershift.openshift.io/control-plane-component: openshift-apiserver
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: false
containers:
- args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,14 @@ metadata:
uid: ""
resourceVersion: "1"
spec:
replicas: 1
replicas: 3
revisionHistoryLimit: 2
selector:
matchLabels:
app: openshift-oauth-apiserver
strategy:
rollingUpdate:
maxSurge: 3
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
Expand Down Expand Up @@ -62,6 +62,20 @@ spec:
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
weight: 100
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: openshift-oauth-apiserver
hypershift.openshift.io/control-plane-component: openshift-oauth-apiserver
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: topology.kubernetes.io/zone
- labelSelector:
matchLabels:
app: openshift-oauth-apiserver
hypershift.openshift.io/control-plane-component: openshift-oauth-apiserver
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: false
containers:
- args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ metadata:
uid: ""
resourceVersion: "1"
spec:
replicas: 1
replicas: 3
revisionHistoryLimit: 2
selector:
matchLabels:
app: private-router
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
Expand Down Expand Up @@ -62,6 +62,22 @@ spec:
hypershift.openshift.io/hosted-control-plane: hcp-namespace
topologyKey: kubernetes.io/hostname
weight: 100
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: private-router
hypershift.openshift.io/control-plane-component: router
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/request-serving-component: "true"
topologyKey: topology.kubernetes.io/zone
- labelSelector:
matchLabels:
app: private-router
hypershift.openshift.io/control-plane-component: router
hypershift.openshift.io/hosted-control-plane: hcp-namespace
hypershift.openshift.io/request-serving-component: "true"
topologyKey: kubernetes.io/hostname
automountServiceAccountToken: false
containers:
- args:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ kind: Deployment
metadata:
name: cluster-policy-controller
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ kind: Deployment
metadata:
name: kube-scheduler
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ kind: Deployment
metadata:
name: openshift-controller-manager
spec:
replicas: 1
revisionHistoryLimit: 2
selector:
matchLabels:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ metadata:
name: openshift-route-controller-manager
namespace: HCP_NAMESPACE
spec:
replicas: 1
selector:
matchLabels:
app: openshift-route-controller-manager
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,34 +7,19 @@ import (
"github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
component "github.com/openshift/hypershift/support/controlplane-component"
"github.com/openshift/hypershift/support/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
)

const (
ComponentName = "cluster-autoscaler"

ImageStreamAutoscalerImage = "cluster-autoscaler"

kubeconfigVolumeName = "kubeconfig"
)

var _ component.ComponentOptions = &Autoscaler{}

type Autoscaler struct {
}

func NewComponent() component.ControlPlaneComponent {
return component.NewDeploymentComponent(ComponentName, &Autoscaler{}).
WithAdaptFunction(AdaptDeployment).
WithPredicate(Predicate).
InjectAvailabilityProberContainer(util.AvailabilityProberOpts{}).
Build()
}

// IsRequestServing implements controlplanecomponent.ComponentOptions.
func (a *Autoscaler) IsRequestServing() bool {
return false
Expand All @@ -50,7 +35,15 @@ func (a *Autoscaler) NeedsManagementKASAccess() bool {
return true
}

func Predicate(cpContext component.ControlPlaneContext) (bool, error) {
func NewComponent() component.ControlPlaneComponent {
return component.NewDeploymentComponent(ComponentName, &Autoscaler{}).
WithAdaptFunction(adaptDeployment).
WithPredicate(predicate).
InjectAvailabilityProberContainer(util.AvailabilityProberOpts{}).
Build()
}

func predicate(cpContext component.WorkloadContext) (bool, error) {
hcp := cpContext.HCP

// Disable cluster-autoscaler component if DisableMachineManagement label is set.
Expand All @@ -73,39 +66,3 @@ func Predicate(cpContext component.ControlPlaneContext) (bool, error) {

return true, nil
}

func AdaptDeployment(cpContext component.ControlPlaneContext, deployment *appsv1.Deployment) error {
hcp := cpContext.HCP

util.UpdateContainer(ComponentName, deployment.Spec.Template.Spec.Containers, func(c *corev1.Container) {
// TODO if the options for the cluster autoscaler continues to grow, we should take inspiration
// from the cluster-autoscaler-operator and create some utility functions for these assignments.
options := hcp.Spec.Autoscaling
if options.MaxNodesTotal != nil {
c.Args = append(c.Args, fmt.Sprintf("--max-nodes-total=%d", *options.MaxNodesTotal))
}

if options.MaxPodGracePeriod != nil {
c.Args = append(c.Args, fmt.Sprintf("--max-graceful-termination-sec=%d", *options.MaxPodGracePeriod))
}

if options.MaxNodeProvisionTime != "" {
c.Args = append(c.Args, fmt.Sprintf("--max-node-provision-time=%s", options.MaxNodeProvisionTime))
}

if options.PodPriorityThreshold != nil {
c.Args = append(c.Args, fmt.Sprintf("--expendable-pods-priority-cutoff=%d", *options.PodPriorityThreshold))
}
})

util.UpdateVolume(kubeconfigVolumeName, deployment.Spec.Template.Spec.Volumes, func(v *corev1.Volume) {
v.Secret.SecretName = manifests.KASServiceCAPIKubeconfigSecret(hcp.Namespace, hcp.Spec.InfraID).Name
})

deployment.Spec.Replicas = ptr.To[int32](1)
if _, exists := hcp.Annotations[hyperv1.DisableClusterAutoscalerAnnotation]; exists {
deployment.Spec.Replicas = ptr.To[int32](0)
}

return nil
}
Loading

0 comments on commit d5762c5

Please sign in to comment.