Skip to content

Commit

Permalink
Merge pull request kubernetes#23934 from mikedanese/automated-cherry-…
Browse files Browse the repository at this point in the history
…pick-of-#23929-kubernetes#23463-upstream-release-1.2

Automated cherry pick of kubernetes#23929 kubernetes#23463
  • Loading branch information
zmerlynn committed Apr 6, 2016
2 parents 27d382d + feb1351 commit 5625094
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 0 deletions.
5 changes: 5 additions & 0 deletions pkg/controller/daemon/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -674,6 +674,9 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
if pod.Spec.NodeName != node.Name {
continue
}
if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
continue
}
// ignore pods that belong to the daemonset when taking into account wheter
// a daemonset should bind to a node.
if pds := dsc.getPodDaemonSet(pod); pds != nil && ds.Name == pds.Name {
Expand All @@ -683,11 +686,13 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
}
_, notFittingCPU, notFittingMemory := predicates.CheckPodsExceedingFreeResources(pods, node.Status.Allocatable)
if len(notFittingCPU)+len(notFittingMemory) != 0 {
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: insufficent free resources", node.ObjectMeta.Name)
return false
}
ports := sets.String{}
for _, pod := range pods {
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name)
return false
}
}
Expand Down
29 changes: 29 additions & 0 deletions pkg/controller/daemon/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,35 @@ func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}

func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
podSpec := api.PodSpec{
NodeName: "too-much-mem",
Containers: []api.Container{{
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceMemory: resource.MustParse("75M"),
api.ResourceCPU: resource.MustParse("75m"),
},
},
}},
}
manager, podControl := newTestController()
node := newNode("too-much-mem", nil)
node.Status.Allocatable = api.ResourceList{
api.ResourceMemory: resource.MustParse("100M"),
api.ResourceCPU: resource.MustParse("200m"),
}
manager.nodeStore.Add(node)
manager.podStore.Add(&api.Pod{
Spec: podSpec,
Status: api.PodStatus{Phase: api.PodSucceeded},
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}

// DaemonSets should place onto nodes with sufficient free resource
func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
podSpec := api.PodSpec{
Expand Down

0 comments on commit 5625094

Please sign in to comment.