Skip to content

Commit

Permalink
Let the HO manage the Karpenter Operator Deployment
Browse files Browse the repository at this point in the history
The Karpeneter Operator Deployment is fully OCP version agnostic, it's applied by the HO and the binary lives within the HO image
  • Loading branch information
enxebre committed Dec 17, 2024
1 parent e949256 commit 2c4aee6
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 0 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -5335,6 +5335,17 @@ func createAWSDefaultSecurityGroup(ctx context.Context, ec2Client ec2iface.EC2AP
Value: awssdk.String(awsSecurityGroupName(infraID)),
})
}

if hcp.Spec.AutoNode != nil && hcp.Spec.AutoNode.Provisioner.Name == hyperv1.ProvisionerKarpeneter &&
hcp.Spec.AutoNode.Provisioner.Karpenter.Platform == hyperv1.AWSPlatform {
if !tagKeys.Has("karpenter.sh/discovery") {
tags = append(tags, &ec2.Tag{
Key: awssdk.String("karpenter.sh/discovery"),
Value: awssdk.String(infraID),
})
}
}

createSGResult, err := ec2Client.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{
GroupName: awssdk.String(awsSecurityGroupName(infraID)),
Description: awssdk.String("default worker security group"),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1757,6 +1757,10 @@ func (r *HostedClusterReconciler) reconcile(ctx context.Context, req ctrl.Reques
}
}

if err := r.reconcileKarpenterOperator(ctx, createOrUpdate, hcluster, hcp, r.HypershiftOperatorImage, controlPlaneOperatorImage); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to reconcile karpenter operator: %w", err)
}

// Reconcile the Ignition server
if !controlplaneOperatorManagesIgnitionServer {
releaseInfo, err := r.lookupReleaseImage(ctx, hcluster, releaseProvider)
Expand Down Expand Up @@ -1955,6 +1959,7 @@ func reconcileHostedControlPlane(hcp *hyperv1.HostedControlPlane, hcluster *hype
hcp.Spec.PausedUntil = hcluster.Spec.PausedUntil
hcp.Spec.OLMCatalogPlacement = hcluster.Spec.OLMCatalogPlacement
hcp.Spec.Autoscaling = hcluster.Spec.Autoscaling
hcp.Spec.AutoNode = hcluster.Spec.AutoNode
hcp.Spec.NodeSelector = hcluster.Spec.NodeSelector
hcp.Spec.Tolerations = hcluster.Spec.Tolerations
hcp.Spec.Labels = hcluster.Spec.Labels
Expand Down
116 changes: 116 additions & 0 deletions hypershift-operator/controllers/hostedcluster/karpenter.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package hostedcluster

import (
"context"
"fmt"

hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1"
karpenteroperatormanifest "github.com/openshift/hypershift/karpenter-operator/manifests"
"github.com/openshift/hypershift/support/upsert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
)

func (r *HostedClusterReconciler) reconcileKarpenterOperator(ctx context.Context, createOrUpdate upsert.CreateOrUpdateFN, hcluster *hyperv1.HostedCluster, hcp *hyperv1.HostedControlPlane, hypershiftOperatorImage, controlPlaneOperatorImage string) error {
if hcluster.Spec.AutoNode == nil || hcluster.Spec.AutoNode.Provisioner.Name != hyperv1.ProvisionerKarpeneter ||
hcluster.Spec.AutoNode.Provisioner.Karpenter.Platform != hyperv1.AWSPlatform {
return nil
}

// Generate configMap with KubeletConfig to register Nodes with karpenter expected taint.
taintConfigName := "set-karpenter-taint"
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: taintConfigName,
Namespace: hcluster.Namespace,
},
}

kubeletConfig := `apiVersion: machineconfiguration.openshift.io/v1
kind: KubeletConfig
metadata:
name: set-karpenter-taint
spec:
kubeletConfig:
registerWithTaints:
- key: "karpenter.sh/unregistered"
value: "true"
effect: "NoExecute"`

_, err := createOrUpdate(ctx, r.Client, configMap, func() error {
configMap.Data = map[string]string{
"config": kubeletConfig,
}
return nil
})
if err != nil {
return fmt.Errorf("failed to create configmap: %w", err)
}

// Managed a NodePool to generate userData for Karpenter instances
// TODO(alberto): consider invoking the token library to manage the karpenter userdata programatically,
// instead of via NodePool API.
nodePool := &hyperv1.NodePool{
ObjectMeta: metav1.ObjectMeta{
Name: "karpenter",
Namespace: hcluster.Namespace,
},
}
_, err = createOrUpdate(ctx, r.Client, nodePool, func() error {
nodePool.Spec = hyperv1.NodePoolSpec{
ClusterName: hcluster.Name,
Replicas: ptr.To(int32(0)),
Release: hcluster.Spec.Release,
Config: []corev1.LocalObjectReference{
{
Name: taintConfigName,
},
},
Management: hyperv1.NodePoolManagement{
UpgradeType: hyperv1.UpgradeTypeReplace,
Replace: &hyperv1.ReplaceUpgrade{
Strategy: hyperv1.UpgradeStrategyRollingUpdate,
RollingUpdate: &hyperv1.RollingUpdate{
MaxUnavailable: ptr.To(intstr.FromInt(0)),
MaxSurge: ptr.To(intstr.FromInt(1)),
},
},
AutoRepair: false,
},
Platform: hyperv1.NodePoolPlatform{
Type: hyperv1.AWSPlatform,
AWS: &hyperv1.AWSNodePoolPlatform{
InstanceType: "m5.large",
Subnet: hyperv1.AWSResourceReference{
ID: ptr.To("subnet-none"),
},
},
},
}
return nil
})
if err != nil {
return fmt.Errorf("failed to create configmap: %w", err)
}
// TODO(alberto): Ensure deletion if autoNode is disabled.

// Run karpenter Operator to manage CRs management and guest side.
if err := karpenteroperatormanifest.ReconcileKarpenterOperator(ctx, createOrUpdate, r.Client, hypershiftOperatorImage, controlPlaneOperatorImage, hcp); err != nil {
return err
}
return nil
}
23 changes: 23 additions & 0 deletions hypershift-operator/controllers/nodepool/token.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ type userData struct {
caCert []byte
ignitionServerEndpoint string
proxy *configv1.Proxy
ami string
}

// NewToken is the contract to create a new Token struct.
Expand Down Expand Up @@ -124,10 +125,19 @@ func NewToken(ctx context.Context, configGenerator *ConfigGenerator, cpoCapabili
proxy := globalconfig.ProxyConfig()
globalconfig.ReconcileProxyConfigWithStatusFromHostedCluster(proxy, configGenerator.hostedCluster)

ami := ""
if configGenerator.hostedCluster.Spec.Platform.AWS != nil {
ami, err = defaultNodePoolAMI(configGenerator.hostedCluster.Spec.Platform.AWS.Region, configGenerator.nodePool.Spec.Arch, configGenerator.releaseImage)
if err != nil {
return nil, err
}
}

token.userData = &userData{
ignitionServerEndpoint: ignEndpoint,
caCert: caCert,
proxy: proxy,
ami: ami,
}

return token, nil
Expand Down Expand Up @@ -335,6 +345,19 @@ func (t *Token) reconcileUserDataSecret(userDataSecret *corev1.Secret, token str
userDataSecret.Annotations = make(map[string]string)
}
userDataSecret.Annotations[nodePoolAnnotation] = client.ObjectKeyFromObject(t.nodePool).String()
if userDataSecret.Labels == nil {
userDataSecret.Labels = make(map[string]string)
}

if t.hostedCluster.Spec.AutoNode != nil && t.hostedCluster.Spec.AutoNode.Provisioner.Name == hyperv1.ProvisionerKarpeneter &&
t.hostedCluster.Spec.AutoNode.Provisioner.Karpenter.Platform == hyperv1.AWSPlatform {
// TODO(alberto): prevent nodePool name collisions adding prefix to karpenter NodePool.
if t.nodePool.GetName() == "karpenter" {
userDataSecret.Labels[hyperv1.NodePoolLabel] = fmt.Sprintf("%s-%s", t.nodePool.Spec.ClusterName, t.nodePool.GetName())
userDataSecret.Labels["hypershift.openshift.io/ami"] = t.userData.ami
}

}

encodedCACert := base64.StdEncoding.EncodeToString(t.userData.caCert)
encodedToken := base64.StdEncoding.EncodeToString([]byte(token))
Expand Down

0 comments on commit 2c4aee6

Please sign in to comment.