-
Notifications
You must be signed in to change notification settings - Fork 328
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
HOSTEDCP-2220: Autonode karpenter #5279
Merged
openshift-merge-bot
merged 6 commits into
openshift:main
from
enxebre:autonode-karpenter
Dec 17, 2024
+13,577
−9
Merged
Changes from 1 commit
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
18ffc1b
Run make update
enxebre e949256
Add karpenter operator
enxebre 2c4aee6
Let the HO manage the Karpenter Operator Deployment
enxebre 258e974
Add AutoNode Karpenter feature gated API support
enxebre 3ef7e84
Add a flag to run with --auto-node
enxebre f596b35
Include karpenter-operator binary within HO image
enxebre File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Add karpenter operator
This introduces a selfcontained controller that knows how to run karpenter management side watching a guest cluster and managing karpeneter CRDs
- Loading branch information
commit e949256bc5591e1c733e5a6eb6d51c7065e5040c
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
package assets | ||
|
||
import ( | ||
"embed" | ||
) | ||
|
||
//go:embed *.yaml | ||
var f embed.FS | ||
|
||
// ReadFile reads and returns the content of the named file. | ||
func ReadFile(name string) ([]byte, error) { | ||
return f.ReadFile(name) | ||
} |
739 changes: 739 additions & 0 deletions
739
karpenter-operator/controllers/karpenter/assets/karpenter.k8s.aws_ec2nodeclasses.yaml
Large diffs are not rendered by default.
Oops, something went wrong.
382 changes: 382 additions & 0 deletions
382
karpenter-operator/controllers/karpenter/assets/karpenter.sh_nodeclaims.yaml
Large diffs are not rendered by default.
Oops, something went wrong.
507 changes: 507 additions & 0 deletions
507
karpenter-operator/controllers/karpenter/assets/karpenter.sh_nodepools.yaml
Large diffs are not rendered by default.
Oops, something went wrong.
327 changes: 327 additions & 0 deletions
327
karpenter-operator/controllers/karpenter/karpenter_controller.go
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,327 @@ | ||
package karpenter | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"sort" | ||
|
||
hyperv1 "github.com/openshift/hypershift/api/hypershift/v1beta1" | ||
"github.com/openshift/hypershift/karpenter-operator/controllers/karpenter/assets" | ||
supportassets "github.com/openshift/hypershift/support/assets" | ||
"github.com/openshift/hypershift/support/upsert" | ||
appsv1 "k8s.io/api/apps/v1" | ||
corev1 "k8s.io/api/core/v1" | ||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" | ||
apierrors "k8s.io/apimachinery/pkg/api/errors" | ||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
"k8s.io/apimachinery/pkg/labels" | ||
"k8s.io/apimachinery/pkg/runtime/schema" | ||
utilerrors "k8s.io/apimachinery/pkg/util/errors" | ||
ctrl "sigs.k8s.io/controller-runtime" | ||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
"sigs.k8s.io/controller-runtime/pkg/cluster" | ||
"sigs.k8s.io/controller-runtime/pkg/controller" | ||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" | ||
"sigs.k8s.io/controller-runtime/pkg/event" | ||
"sigs.k8s.io/controller-runtime/pkg/handler" | ||
"sigs.k8s.io/controller-runtime/pkg/source" | ||
) | ||
|
||
const ( | ||
karpenterFinalizer = "hypershift.openshift.io/karpenter-finalizer" | ||
// userDataAMILabel is a label set in the userData secret generated for karpenter instances. | ||
userDataAMILabel = "hypershift.openshift.io/ami" | ||
) | ||
|
||
var ( | ||
crdEC2NodeClass = supportassets.MustCRD(assets.ReadFile, "karpenter.k8s.aws_ec2nodeclasses.yaml") | ||
crdNodePool = supportassets.MustCRD(assets.ReadFile, "karpenter.sh_nodepools.yaml") | ||
crdNodeClaim = supportassets.MustCRD(assets.ReadFile, "karpenter.sh_nodeclaims.yaml") | ||
) | ||
|
||
type Reconciler struct { | ||
ManagementClient client.Client | ||
GuestClient client.Client | ||
Namespace string | ||
ControlPlaneOperatorImage string | ||
upsert.CreateOrUpdateProvider | ||
} | ||
|
||
func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, managementCluster cluster.Cluster) error { | ||
r.ManagementClient = managementCluster.GetClient() | ||
r.GuestClient = mgr.GetClient() | ||
r.CreateOrUpdateProvider = upsert.New(false) | ||
|
||
// First install the CRDs so we can create a watch below. | ||
if err := r.reconcileCRDs(ctx, true); err != nil { | ||
return err | ||
} | ||
|
||
c, err := controller.New("karpenter", mgr, controller.Options{Reconciler: r}) | ||
if err != nil { | ||
return fmt.Errorf("failed to construct controller: %w", err) | ||
} | ||
|
||
// Watch CRDs guest side. | ||
if err := c.Watch(source.Kind[client.Object](mgr.GetCache(), &apiextensionsv1.CustomResourceDefinition{}, handler.EnqueueRequestsFromMapFunc( | ||
func(ctx context.Context, o client.Object) []ctrl.Request { | ||
// Only watch our Karpenter CRDs | ||
switch o.GetName() { | ||
case "ec2nodeclasses.karpenter.k8s.aws", | ||
"nodepools.karpenter.sh", | ||
"nodeclaims.karpenter.sh": | ||
return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: r.Namespace}}} | ||
} | ||
return nil | ||
}))); err != nil { | ||
return fmt.Errorf("failed to watch CRDs: %w", err) | ||
} | ||
|
||
// Watch EC2NodeClass guest side. | ||
if err := c.Watch(source.Kind[client.Object](mgr.GetCache(), &unstructured.Unstructured{Object: map[string]interface{}{ | ||
"apiVersion": "karpenter.k8s.aws/v1", | ||
"kind": "EC2NodeClass", | ||
}}, &handler.EnqueueRequestForObject{})); err != nil { | ||
return fmt.Errorf("failed to watch EC2NodeClass: %w", err) | ||
} | ||
|
||
// Watch the karpenter Deployment management side. | ||
if err := c.Watch(source.Kind[client.Object](managementCluster.GetCache(), &appsv1.Deployment{}, handler.EnqueueRequestsFromMapFunc( | ||
func(ctx context.Context, o client.Object) []ctrl.Request { | ||
if o.GetNamespace() != r.Namespace || o.GetName() != "karpenter" { | ||
return nil | ||
} | ||
return []ctrl.Request{{NamespacedName: client.ObjectKeyFromObject(o)}} | ||
}))); err != nil { | ||
return fmt.Errorf("failed to watch Deployment: %w", err) | ||
} | ||
|
||
// Trigger initial sync. | ||
initialSync := make(chan event.GenericEvent) | ||
if err := c.Watch(source.Channel(initialSync, &handler.EnqueueRequestForObject{})); err != nil { | ||
return fmt.Errorf("failed to watch initial sync channel: %w", err) | ||
} | ||
go func() { | ||
initialSync <- event.GenericEvent{Object: &hyperv1.HostedControlPlane{}} | ||
}() | ||
|
||
return nil | ||
} | ||
|
||
func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { | ||
log := ctrl.LoggerFrom(ctx) | ||
log.Info("Reconciling", "req", req) | ||
|
||
hcp, err := r.getHCP(ctx) | ||
if err != nil { | ||
if apierrors.IsNotFound(err) { | ||
return ctrl.Result{}, nil | ||
} | ||
return ctrl.Result{}, err | ||
} | ||
if hcp.DeletionTimestamp != nil { | ||
// TODO(alberto): implement deletion. E.g. loop over nodeClaims delete them, wait and delete karpeneter deployment. | ||
if controllerutil.ContainsFinalizer(hcp, karpenterFinalizer) { | ||
originalHCP := hcp.DeepCopy() | ||
controllerutil.RemoveFinalizer(hcp, karpenterFinalizer) | ||
if err := r.ManagementClient.Patch(ctx, hcp, client.MergeFromWithOptions(originalHCP, client.MergeFromWithOptimisticLock{})); err != nil { | ||
return ctrl.Result{}, fmt.Errorf("failed to remove finalizer from cluster: %w", err) | ||
} | ||
} | ||
return ctrl.Result{}, nil | ||
} | ||
if !controllerutil.ContainsFinalizer(hcp, karpenterFinalizer) { | ||
originalHCP := hcp.DeepCopy() | ||
controllerutil.AddFinalizer(hcp, karpenterFinalizer) | ||
if err := r.ManagementClient.Patch(ctx, hcp, client.MergeFromWithOptions(originalHCP, client.MergeFromWithOptimisticLock{})); err != nil { | ||
return ctrl.Result{}, fmt.Errorf("failed to add finalizer to hostedControlPlane: %w", err) | ||
} | ||
} | ||
|
||
// TODO(alberto): | ||
// - reconcile validatingAdmissionPolicy to enforce shared ownership. | ||
// - Watch userDataSecret. | ||
// - Solve token rotation causing drift. | ||
// - CSR approval. | ||
|
||
userDataSecret, err := r.getUserDataSecret(ctx, hcp) | ||
if err != nil { | ||
return ctrl.Result{}, err | ||
} | ||
|
||
if err := r.reconcileEC2NodeClassOwnedFields(ctx, userDataSecret); err != nil { | ||
return ctrl.Result{}, err | ||
} | ||
|
||
if err := r.reconcileEC2NodeClassDefault(ctx, userDataSecret, hcp); err != nil { | ||
return ctrl.Result{}, err | ||
} | ||
|
||
if err := r.reconcileKarpenter(ctx, hcp); err != nil { | ||
return ctrl.Result{}, fmt.Errorf("failed to reconcile karpenter deployment: %w", err) | ||
} | ||
|
||
if err := r.reconcileCRDs(ctx, false); err != nil { | ||
return ctrl.Result{}, err | ||
} | ||
|
||
return ctrl.Result{}, nil | ||
} | ||
|
||
// reconcileCRDs reconcile the Karpenter CRDs, if onlyCreate is true it uses an only write non cached client. | ||
func (r *Reconciler) reconcileCRDs(ctx context.Context, onlyCreate bool) error { | ||
log := ctrl.LoggerFrom(ctx) | ||
|
||
errs := []error{} | ||
var op controllerutil.OperationResult | ||
var err error | ||
for _, crd := range []*apiextensionsv1.CustomResourceDefinition{ | ||
crdEC2NodeClass, | ||
crdNodePool, | ||
crdNodeClaim, | ||
} { | ||
if onlyCreate { | ||
if err := r.GuestClient.Create(ctx, crd); err != nil { | ||
if !apierrors.IsAlreadyExists(err) { | ||
errs = append(errs, err) | ||
} | ||
} | ||
} else { | ||
op, err = r.CreateOrUpdate(ctx, r.GuestClient, crd, func() error { | ||
return nil | ||
}) | ||
if err != nil { | ||
errs = append(errs, err) | ||
} | ||
|
||
} | ||
} | ||
if err := utilerrors.NewAggregate(errs); err != nil { | ||
return fmt.Errorf("failed to reconcile CRDs: %w", err) | ||
} | ||
log.Info("Reconciled CRDs", "op", op) | ||
|
||
return nil | ||
} | ||
|
||
func (r *Reconciler) reconcileEC2NodeClassOwnedFields(ctx context.Context, userDataSecret *corev1.Secret) error { | ||
log := ctrl.LoggerFrom(ctx) | ||
|
||
ec2NodeClassList := &unstructured.UnstructuredList{} | ||
ec2NodeClassList.SetGroupVersionKind(schema.GroupVersionKind{ | ||
Group: "karpenter.k8s.aws", | ||
Version: "v1", | ||
Kind: "EC2NodeClassList", | ||
}) | ||
err := r.GuestClient.List(ctx, ec2NodeClassList) | ||
if err != nil { | ||
return fmt.Errorf("failed to get EC2NodeClassList: %w", err) | ||
} | ||
|
||
errs := []error{} | ||
for _, ec2NodeClass := range ec2NodeClassList.Items { | ||
ec2NodeClass.SetGroupVersionKind(schema.GroupVersionKind{ | ||
Group: "karpenter.k8s.aws", | ||
Version: "v1", | ||
Kind: "EC2NodeClass", | ||
}) | ||
op, err := r.CreateOrUpdate(ctx, r.GuestClient, &ec2NodeClass, func() error { | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["userData"] = string(userDataSecret.Data["value"]) | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["amiFamily"] = "Custom" | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["amiSelectorTerms"] = []map[string]interface{}{ | ||
{ | ||
"id": string(userDataSecret.Labels[userDataAMILabel]), | ||
}, | ||
} | ||
return nil | ||
}) | ||
if err != nil { | ||
errs = append(errs, err) | ||
} | ||
if err == nil { | ||
log.Info("Set managed fields in ec2NodeClass", "ec2NodeClass", ec2NodeClass.GetName(), "op", op) | ||
} | ||
} | ||
if err := utilerrors.NewAggregate(errs); err != nil { | ||
return fmt.Errorf("failed to update EC2NodeClass: %w", err) | ||
} | ||
return nil | ||
} | ||
|
||
func (r *Reconciler) reconcileEC2NodeClassDefault(ctx context.Context, userDataSecret *corev1.Secret, hcp *hyperv1.HostedControlPlane) error { | ||
log := ctrl.LoggerFrom(ctx) | ||
|
||
// Create an unstructured object for the EC2NodeClass | ||
ec2NodeClass := &unstructured.Unstructured{} | ||
ec2NodeClass.SetGroupVersionKind(schema.GroupVersionKind{ | ||
Group: "karpenter.k8s.aws", | ||
Version: "v1", | ||
Kind: "EC2NodeClass", | ||
}) | ||
ec2NodeClass.SetName("default") | ||
|
||
op, err := r.CreateOrUpdate(ctx, r.GuestClient, ec2NodeClass, func() error { | ||
ec2NodeClass.Object["spec"] = map[string]interface{}{} | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["role"] = "KarpenterNodeRole-agl" // TODO(alberto): set a convention for this e.g. openshift-karpenter-infraID | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["userData"] = string(userDataSecret.Data["value"]) | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["amiFamily"] = "Custom" | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["amiSelectorTerms"] = []map[string]interface{}{ | ||
{ | ||
"id": string(userDataSecret.Labels[userDataAMILabel]), | ||
}, | ||
} | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["subnetSelectorTerms"] = []map[string]interface{}{ | ||
{ | ||
"tags": map[string]interface{}{ | ||
"karpenter.sh/discovery": hcp.Spec.InfraID, | ||
}, | ||
}, | ||
} | ||
ec2NodeClass.Object["spec"].(map[string]interface{})["securityGroupSelectorTerms"] = []map[string]interface{}{ | ||
{ | ||
"tags": map[string]interface{}{ | ||
"karpenter.sh/discovery": hcp.Spec.InfraID, | ||
}, | ||
}, | ||
} | ||
return nil | ||
}) | ||
if err != nil { | ||
return fmt.Errorf("failed to reconcile default EC2NodeClass: %w", err) | ||
} | ||
log.Info("Reconciled default EC2NodeClass", "op", op) | ||
return nil | ||
} | ||
|
||
func (r *Reconciler) getUserDataSecret(ctx context.Context, hcp *hyperv1.HostedControlPlane) (*corev1.Secret, error) { | ||
labelSelector := labels.SelectorFromSet(labels.Set{hyperv1.NodePoolLabel: fmt.Sprintf("%s-karpenter", hcp.GetName())}) | ||
listOptions := &client.ListOptions{ | ||
LabelSelector: labelSelector, | ||
Namespace: r.Namespace, | ||
} | ||
secretList := &corev1.SecretList{} | ||
err := r.ManagementClient.List(ctx, secretList, listOptions) | ||
if err != nil { | ||
return nil, fmt.Errorf("failed to list secrets: %w", err) | ||
} | ||
|
||
sort.Slice(secretList.Items, func(i, j int) bool { | ||
return secretList.Items[i].CreationTimestamp.After(secretList.Items[j].CreationTimestamp.Time) | ||
}) | ||
if len(secretList.Items) < 1 { | ||
return nil, fmt.Errorf("expected 1 secret, got 0") | ||
} | ||
return &secretList.Items[0], err | ||
} | ||
|
||
func (r *Reconciler) getHCP(ctx context.Context) (*hyperv1.HostedControlPlane, error) { | ||
hcpList := &hyperv1.HostedControlPlaneList{} | ||
if err := r.ManagementClient.List(ctx, hcpList, client.InNamespace(r.Namespace)); err != nil { | ||
return nil, err | ||
} | ||
if len(hcpList.Items) == 0 { | ||
return nil, fmt.Errorf("failed to find HostedControlPlane in namespace %s", r.Namespace) | ||
} | ||
|
||
return &hcpList.Items[0], nil | ||
} |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I can't find the implementation of this function anywhere in the PR :)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
6953f06#diff-d716aa33fdf60807fee987397201d663079fdd4fdf6d50bd87723b8f6c525f3cR341