Skip to content

Commit

Permalink
Merge pull request #106747 from ahg-g/ahg-test
Browse files Browse the repository at this point in the history
Added an integration test for NodeResourcesFit scoring
  • Loading branch information
k8s-ci-robot authored Dec 8, 2021
2 parents 022d49d + 33a04dc commit d7f8234
Show file tree
Hide file tree
Showing 9 changed files with 161 additions and 55 deletions.
2 changes: 1 addition & 1 deletion pkg/scheduler/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func (c *Configurator) create() (*Scheduler, error) {
prof := &c.profiles[i]
var found = false
for k := range prof.PluginConfig {
if prof.PluginConfig[k].Name == noderesources.FitName {
if prof.PluginConfig[k].Name == noderesources.Name {
// Update the existing args
pc := &prof.PluginConfig[k]
args, ok := pc.Args.(*schedulerapi.NodeResourcesFitArgs)
Expand Down

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions pkg/scheduler/framework/plugins/noderesources/fit.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ var _ framework.EnqueueExtensions = &Fit{}
var _ framework.ScorePlugin = &Fit{}

const (
// FitName is the name of the plugin used in the plugin registry and configurations.
FitName = names.NodeResourcesFit
// Name is the name of the plugin used in the plugin registry and configurations.
Name = names.NodeResourcesFit

// preFilterStateKey is the key in CycleState to NodeResourcesFit pre-computed data.
// Using the name of the plugin will likely help us avoid collisions with other plugins.
preFilterStateKey = "PreFilter" + FitName
preFilterStateKey = "PreFilter" + Name
)

// nodeResourceStrategyTypeMap maps strategy to scorer implementation
Expand Down Expand Up @@ -100,7 +100,7 @@ func (s *preFilterState) Clone() framework.StateData {

// Name returns name of the plugin. It is used in logs, etc.
func (f *Fit) Name() string {
return FitName
return Name
}

// NewFit initializes a new plugin and returns it.
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/framework/plugins/registry.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func NewInTreeRegistry() runtime.Registry {
nodeaffinity.Name: nodeaffinity.New,
podtopologyspread.Name: podtopologyspread.New,
nodeunschedulable.Name: nodeunschedulable.New,
noderesources.FitName: runtime.FactoryAdapter(fts, noderesources.NewFit),
noderesources.Name: runtime.FactoryAdapter(fts, noderesources.NewFit),
noderesources.BalancedAllocationName: runtime.FactoryAdapter(fts, noderesources.NewBalancedAllocation),
volumebinding.Name: runtime.FactoryAdapter(fts, volumebinding.New),
volumerestrictions.Name: runtime.FactoryAdapter(fts, volumerestrictions.New),
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/generic_scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1312,7 +1312,7 @@ func TestZeroRequest(t *testing.T) {
fts := feature.Features{}
pluginRegistrations := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterScorePlugin(noderesources.FitName, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1),
st.RegisterScorePlugin(noderesources.Name, frameworkruntime.FactoryAdapter(fts, noderesources.NewFit), 1),
st.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1),
st.RegisterScorePlugin(selectorspread.Name, selectorspread.New, 1),
st.RegisterPreScorePlugin(selectorspread.Name, selectorspread.New),
Expand Down
6 changes: 3 additions & 3 deletions pkg/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -858,12 +858,12 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
framework.Unschedulable,
fmt.Sprintf("Insufficient %v", v1.ResourceCPU),
fmt.Sprintf("Insufficient %v", v1.ResourceMemory),
).WithFailedPlugin(noderesources.FitName)
).WithFailedPlugin(noderesources.Name)
}
fns := []st.RegisterPluginFunc{
st.RegisterQueueSortPlugin(queuesort.Name, queuesort.New),
st.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
st.RegisterPluginAsExtensions(noderesources.FitName, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
st.RegisterPluginAsExtensions(noderesources.Name, frameworkruntime.FactoryAdapter(feature.Features{}, noderesources.NewFit), "Filter", "PreFilter"),
}
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, informerFactory, nil, fns...)

Expand All @@ -879,7 +879,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
NumAllNodes: len(nodes),
Diagnosis: framework.Diagnosis{
NodeToStatusMap: failedNodeStatues,
UnschedulablePlugins: sets.NewString(noderesources.FitName),
UnschedulablePlugins: sets.NewString(noderesources.Name),
},
}
if len(fmt.Sprint(expectErr)) > 150 {
Expand Down
1 change: 1 addition & 0 deletions pkg/scheduler/testing/wrappers.go
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,7 @@ func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper {
Image: imageutils.GetPauseImageName(),
Resources: v1.ResourceRequirements{
Requests: res,
Limits: res,
},
})
return p
Expand Down
4 changes: 2 additions & 2 deletions test/integration/scheduler/framework_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1965,10 +1965,10 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
// would fail first and exit the Filter phase.
Enabled: []v1beta3.Plugin{
{Name: filterPluginName},
{Name: noderesources.FitName},
{Name: noderesources.Name},
},
Disabled: []v1beta3.Plugin{
{Name: noderesources.FitName},
{Name: noderesources.Name},
},
},
},
Expand Down
127 changes: 116 additions & 11 deletions test/integration/scheduler/priorities_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@ import (
"strings"
"testing"

v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
Expand All @@ -36,13 +37,18 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
st "k8s.io/kubernetes/pkg/scheduler/testing"
testutils "k8s.io/kubernetes/test/integration/util"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
)

const (
resourceGPU = "example.com/gpu"
)

// This file tests the scheduler priority functions.
func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *testutils.TestContext {
cfg := configtesting.V1beta3ToInternalWithDefaults(t, v1beta3.KubeSchedulerConfiguration{
Expand Down Expand Up @@ -70,9 +76,108 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes
return testCtx
}

// TestNodeAffinity verifies that scheduler's node affinity priority function
func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext {
cfg := configtesting.V1beta3ToInternalWithDefaults(t, v1beta3.KubeSchedulerConfiguration{
Profiles: []v1beta3.KubeSchedulerProfile{
{
SchedulerName: pointer.StringPtr(v1.DefaultSchedulerName),
},
{
SchedulerName: pointer.StringPtr("gpu-binpacking-scheduler"),
PluginConfig: []v1beta3.PluginConfig{
{
Name: noderesources.Name,
Args: runtime.RawExtension{Object: &v1beta3.NodeResourcesFitArgs{
ScoringStrategy: &v1beta3.ScoringStrategy{
Type: v1beta3.MostAllocated,
Resources: []v1beta3.ResourceSpec{
{Name: string(v1.ResourceCPU), Weight: 1},
{Name: string(v1.ResourceMemory), Weight: 1},
{Name: resourceGPU, Weight: 2}},
},
}},
},
},
},
},
})
testCtx := testutils.InitTestSchedulerWithOptions(
t,
testutils.InitTestAPIServer(t, strings.ToLower(noderesources.Name), nil),
scheduler.WithProfiles(cfg.Profiles...),
)
testutils.SyncInformerFactory(testCtx)
go testCtx.Scheduler.Run(testCtx.Ctx)
return testCtx
}

// TestNodeResourcesScoring verifies that scheduler's node resources priority function
// works correctly.
func TestNodeResourcesScoring(t *testing.T) {
testCtx := initTestSchedulerForNodeResourcesTest(t)
defer testutils.CleanupTest(t, testCtx)
// Add a few nodes.
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
map[v1.ResourceName]string{
v1.ResourceCPU: "8",
v1.ResourceMemory: "16G",
resourceGPU: "4",
}), 2)
if err != nil {
t.Fatal(err)
}
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Req(
map[v1.ResourceName]string{
v1.ResourceCPU: "2",
v1.ResourceMemory: "4G",
resourceGPU: "1",
},
).Obj())
if err != nil {
t.Fatal(err)
}
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Req(
map[v1.ResourceName]string{
v1.ResourceCPU: "1",
v1.ResourceMemory: "2G",
resourceGPU: "2",
},
).Obj())
if err != nil {
t.Fatal(err)
}
if cpuBoundPod1.Spec.NodeName == "" || gpuBoundPod1.Spec.NodeName == "" {
t.Fatalf("pods should have nodeName assigned, got %q and %q",
cpuBoundPod1.Spec.NodeName, gpuBoundPod1.Spec.NodeName)
}

// Since both pods used the default scheduler, then they should land on two different
// nodes because the default configuration uses LeastAllocated.
if cpuBoundPod1.Spec.NodeName == gpuBoundPod1.Spec.NodeName {
t.Fatalf("pods should have landed on different nodes, both scheduled on %q",
cpuBoundPod1.Spec.NodeName)
}

// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Req(
map[v1.ResourceName]string{
v1.ResourceCPU: "2",
v1.ResourceMemory: "4G",
resourceGPU: "1",
},
).Obj())
if err != nil {
t.Fatal(err)
}
if cpuBoundPod2.Spec.NodeName != gpuBoundPod1.Spec.NodeName {
t.Errorf("pods should have landed on the same node")
}
}

// TestNodeAffinityScoring verifies that scheduler's node affinity priority function
// works correctly.s
func TestNodeAffinity(t *testing.T) {
func TestNodeAffinityScoring(t *testing.T) {
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name)
defer testutils.CleanupTest(t, testCtx)
// Add a few nodes.
Expand Down Expand Up @@ -122,9 +227,9 @@ func TestNodeAffinity(t *testing.T) {
}
}

// TestPodAffinity verifies that scheduler's pod affinity priority function
// TestPodAffinityScoring verifies that scheduler's pod affinity priority function
// works correctly.
func TestPodAffinity(t *testing.T) {
func TestPodAffinityScoring(t *testing.T) {
labelKey := "service"
labelValue := "S1"
topologyKey := "node-topologykey"
Expand Down Expand Up @@ -236,9 +341,9 @@ func TestPodAffinity(t *testing.T) {
}
}

// TestImageLocality verifies that the scheduler's image locality priority function
// TestImageLocalityScoring verifies that the scheduler's image locality priority function
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
func TestImageLocality(t *testing.T) {
func TestImageLocalityScoring(t *testing.T) {
testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name)
defer testutils.CleanupTest(t, testCtx)

Expand Down Expand Up @@ -295,8 +400,8 @@ func makeContainersWithImages(images []string) []v1.Container {
return containers
}

// TestPodTopologySpreadScore verifies that the PodTopologySpread Score plugin works.
func TestPodTopologySpreadScore(t *testing.T) {
// TestPodTopologySpreadScoring verifies that the PodTopologySpread Score plugin works.
func TestPodTopologySpreadScoring(t *testing.T) {
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
defer testutils.CleanupTest(t, testCtx)
cs := testCtx.ClientSet
Expand Down Expand Up @@ -403,10 +508,10 @@ func TestPodTopologySpreadScore(t *testing.T) {
}
}

// TestDefaultPodTopologySpreadScore verifies that the PodTopologySpread Score plugin
// TestDefaultPodTopologySpreadScoring verifies that the PodTopologySpread Score plugin
// with the system default spreading spreads Pods belonging to a Service.
// The setup has 300 nodes over 3 zones.
func TestDefaultPodTopologySpreadScore(t *testing.T) {
func TestDefaultPodTopologySpreadScoring(t *testing.T) {
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
t.Cleanup(func() {
testutils.CleanupTest(t, testCtx)
Expand Down

0 comments on commit d7f8234

Please sign in to comment.