-
Notifications
You must be signed in to change notification settings - Fork 40.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[AppArmor] Hold bad AppArmor pods in pending rather than rejecting #35342
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -751,7 +751,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub | |
return nil, fmt.Errorf("failed to initialize eviction manager: %v", err) | ||
} | ||
klet.evictionManager = evictionManager | ||
klet.AddPodAdmitHandler(evictionAdmitHandler) | ||
klet.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler) | ||
|
||
// add sysctl admission | ||
runtimeSupport, err := sysctl.NewRuntimeAdmitHandler(klet.containerRuntime) | ||
|
@@ -769,9 +769,9 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub | |
if err != nil { | ||
return nil, err | ||
} | ||
klet.AddPodAdmitHandler(runtimeSupport) | ||
klet.AddPodAdmitHandler(safeWhitelist) | ||
klet.AddPodAdmitHandler(unsafeWhitelist) | ||
klet.admitHandlers.AddPodAdmitHandler(runtimeSupport) | ||
klet.admitHandlers.AddPodAdmitHandler(safeWhitelist) | ||
klet.admitHandlers.AddPodAdmitHandler(unsafeWhitelist) | ||
|
||
// enable active deadline handler | ||
activeDeadlineHandler, err := newActiveDeadlineHandler(klet.statusManager, kubeDeps.Recorder, klet.clock) | ||
|
@@ -781,14 +781,15 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub | |
klet.AddPodSyncLoopHandler(activeDeadlineHandler) | ||
klet.AddPodSyncHandler(activeDeadlineHandler) | ||
|
||
klet.appArmorValidator = apparmor.NewValidator(kubeCfg.ContainerRuntime) | ||
klet.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator)) | ||
klet.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(klet.getNodeAnyWay)) | ||
klet.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(klet.getNodeAnyWay)) | ||
// apply functional Option's | ||
for _, opt := range kubeDeps.Options { | ||
opt(klet) | ||
} | ||
|
||
klet.appArmorValidator = apparmor.NewValidator(kubeCfg.ContainerRuntime) | ||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewAppArmorAdmitHandler(klet.appArmorValidator)) | ||
|
||
// Finally, put the most recent version of the config on the Kubelet, so | ||
// people can see how it was configured. | ||
klet.kubeletConfiguration = *kubeCfg | ||
|
@@ -1046,7 +1047,13 @@ type Kubelet struct { | |
|
||
// TODO: think about moving this to be centralized in PodWorkers in follow-on. | ||
// the list of handlers to call during pod admission. | ||
lifecycle.PodAdmitHandlers | ||
admitHandlers lifecycle.PodAdmitHandlers | ||
|
||
// softAdmithandlers are applied to the pod after it is admitted by the Kubelet, but before it is | ||
// run. A pod rejected by a softAdmitHandler will be left in a Pending state indefinitely. If a | ||
// rejected pod should not be recreated, or the scheduler is not aware of the rejection rule, the | ||
// admission rule should be applied by a softAdmitHandler. | ||
softAdmitHandlers lifecycle.PodAdmitHandlers | ||
|
||
// the list of handlers to call during pod sync loop. | ||
lifecycle.PodSyncLoopHandlers | ||
|
@@ -1380,17 +1387,42 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error { | |
metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime)) | ||
} | ||
|
||
runnable := kl.canRunPod(pod) | ||
if !runnable.Admit { | ||
// Pod is not runnable; update the Pod and Container statuses to why. | ||
apiPodStatus.Reason = runnable.Reason | ||
apiPodStatus.Message = runnable.Message | ||
// Waiting containers are not creating. | ||
const waitingReason = "Blocked" | ||
for _, cs := range apiPodStatus.InitContainerStatuses { | ||
if cs.State.Waiting != nil { | ||
cs.State.Waiting.Reason = waitingReason | ||
} | ||
} | ||
for _, cs := range apiPodStatus.ContainerStatuses { | ||
if cs.State.Waiting != nil { | ||
cs.State.Waiting.Reason = waitingReason | ||
} | ||
} | ||
} | ||
|
||
// Update status in the status manager | ||
kl.statusManager.SetPodStatus(pod, apiPodStatus) | ||
|
||
// Kill pod if it should not be running | ||
if errOuter := canRunPod(pod); errOuter != nil || pod.DeletionTimestamp != nil || apiPodStatus.Phase == api.PodFailed { | ||
if errInner := kl.killPod(pod, nil, podStatus, nil); errInner != nil { | ||
errOuter = fmt.Errorf("error killing pod: %v", errInner) | ||
utilruntime.HandleError(errOuter) | ||
if !runnable.Admit || pod.DeletionTimestamp != nil || apiPodStatus.Phase == api.PodFailed { | ||
var syncErr error | ||
if err := kl.killPod(pod, nil, podStatus, nil); err != nil { | ||
syncErr = fmt.Errorf("error killing pod: %v", err) | ||
utilruntime.HandleError(syncErr) | ||
} else { | ||
if !runnable.Admit { | ||
// There was no error killing the pod, but the pod cannot be run. | ||
// Return an error to signal that the sync loop should back off. | ||
syncErr = fmt.Errorf("pod cannot be run: %s", runnable.Message) | ||
} | ||
} | ||
// there was no error killing the pod, but the pod cannot be run, so we return that err (if any) | ||
return errOuter | ||
return syncErr | ||
} | ||
|
||
// Create Mirror Pod for Static Pod if it doesn't already exist | ||
|
@@ -1574,7 +1606,7 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str | |
// TODO: move out of disk check into a pod admitter | ||
// TODO: out of resource eviction should have a pod admitter call-out | ||
attrs := &lifecycle.PodAdmitAttributes{Pod: pod, OtherPods: pods} | ||
for _, podAdmitHandler := range kl.PodAdmitHandlers { | ||
for _, podAdmitHandler := range kl.admitHandlers { | ||
if result := podAdmitHandler.Admit(attrs); !result.Admit { | ||
return false, result.Reason, result.Message | ||
} | ||
|
@@ -1589,6 +1621,29 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str | |
return true, "", "" | ||
} | ||
|
||
func (kl *Kubelet) canRunPod(pod *api.Pod) lifecycle.PodAdmitResult { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can I say that calling this new method as canRunPod, which is the same as what you are using below: pkg/kubelet/util.go::canRunPod(...) is very confusing to me? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ack. I left a TODO to get rid of that other method. Do you have a suggestion for a better name? |
||
attrs := &lifecycle.PodAdmitAttributes{Pod: pod} | ||
// Get "OtherPods". Rejected pods are failed, so only include admitted pods that are alive. | ||
attrs.OtherPods = kl.filterOutTerminatedPods(kl.podManager.GetPods()) | ||
|
||
for _, handler := range kl.softAdmitHandlers { | ||
if result := handler.Admit(attrs); !result.Admit { | ||
return result | ||
} | ||
} | ||
|
||
// TODO: Refactor as a soft admit handler. | ||
if err := canRunPod(pod); err != nil { | ||
return lifecycle.PodAdmitResult{ | ||
Admit: false, | ||
Reason: "Forbidden", | ||
Message: err.Error(), | ||
} | ||
} | ||
|
||
return lifecycle.PodAdmitResult{Admit: true} | ||
} | ||
|
||
// syncLoop is the main loop for processing changes. It watches for changes from | ||
// three channels (file, apiserver, and http) and creates a union of them. For | ||
// any new change seen, will run a sync against desired state and running state. If | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Does runningPod here includes PodInfraContainer too? I think it is, but want to be sure here. Otherwise, returning earlier would have podInfraContainer leakage issue.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, it does. (See line 1424 below). This method is a noop if
len(runningPod.Containers) == 0
, this is just an optimizitaion.