Skip to content

Commit

Permalink
Integration test to verify alloc works
Browse files Browse the repository at this point in the history
Add integration test to verify allocatable can works
  • Loading branch information
resouer committed Jan 20, 2016
1 parent e64fe82 commit 0202a20
Show file tree
Hide file tree
Showing 2 changed files with 116 additions and 1 deletion.
1 change: 0 additions & 1 deletion plugin/pkg/scheduler/algorithm/predicates/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,6 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
}

allocatable := info.Status.Allocatable

if int64(len(existingPods))+1 > allocatable.Pods().Value() {
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value())
return false, ErrExceededMaxPodNumber
Expand Down
116 changes: 116 additions & 0 deletions test/integration/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -449,3 +449,119 @@ func createPod(name string, annotation map[string]string) *api.Pod {
},
}
}

// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
func TestAllocatable(t *testing.T) {
framework.DeleteAllEtcdKeys()

var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
defer s.Close()

masterConfig := framework.NewIntegrationTestMasterConfig()
m = master.New(masterConfig)

// 1. create and start default-scheduler
restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()})

schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
scheduler.New(schedulerConfig).Run()
// default-scheduler will be stopped later
defer close(schedulerConfig.StopEverything)

// 2. create a node without allocatable awareness
node := &api.Node{
ObjectMeta: api.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
Spec: api.NodeSpec{Unschedulable: false},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
},
},
}

allocNode, err := restClient.Nodes().Create(node)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}

// 3. create resource pod which requires less than Capacity
podResource := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "pod-test-allocatable"},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "container",
Image: "kubernetes/pause:go",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
},
},
},
},
},
}

testAllocPod, err := restClient.Pods(api.NamespaceDefault).Create(podResource)
if err != nil {
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
}

// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod.Namespace, testAllocPod.Name))
if err != nil {
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
} else {
t.Logf("Test allocatable unawareness: %s Pod scheduled", testAllocPod.Name)
}

// 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement
allocNode.Status = api.NodeStatus{
Capacity: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
},
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
},
}

if _, err := restClient.Nodes().UpdateStatus(allocNode); err != nil {
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
}

if err := restClient.Pods(api.NamespaceDefault).Delete(podResource.Name, &api.DeleteOptions{}); err != nil {
t.Fatalf("Failed to remove first resource pod: %v", err)
}

// 6. Make another pod with different name, same resource request
podResource.ObjectMeta.Name = "pod-test-allocatable2"
testAllocPod2, err := restClient.Pods(api.NamespaceDefault).Create(podResource)
if err != nil {
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
}

// 7. Test: this test pod should not be scheduled since it request more than Allocatable
err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod2.Namespace, testAllocPod2.Name))
if err == nil {
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectly, %v", testAllocPod2.Name, err)
} else {
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
}
}

1 comment on commit 0202a20

@k8s-teamcity-mesosphere

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TeamCity OSS :: Kubernetes Mesos :: 4 - Smoke Tests Build 12037 outcome was FAILURE
Summary: Exit code 1 (new) Build time: 00:03:05

Please sign in to comment.