Skip to content

Commit

Permalink
Add integration test for VolumeRestriction in requeueing scenarios
Browse files Browse the repository at this point in the history
Signed-off-by: utam0k <k0ma@utam0k.jp>
  • Loading branch information
utam0k committed Oct 6, 2024
1 parent 055f0fa commit 024deae
Showing 1 changed file with 65 additions and 0 deletions.
65 changes: 65 additions & 0 deletions test/integration/scheduler/queue_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1231,6 +1231,71 @@ func TestCoreResourceEnqueue(t *testing.T) {
wantRequeuedPods: sets.Set[string]{},
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected by the VolumeRestriction plugin is requeued when the the PVC bound to the pod is added",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) error {
pvc2 := st.MakePersistentVolumeClaim().
Name("pvc1").
Annotation(volume.AnnBindCompleted, "true").
VolumeName("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj()
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc2, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("failed to add pvc1: %w", err)
}
return nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected by the VolumeRestriction plugin is requeued when the the PVC bound to the pod is added",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj(),
},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
Annotation(volume.AnnBindCompleted, "true").
VolumeName("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
initialPods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod2").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) error {
if err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).Delete(testCtx.Ctx, "pod1", metav1.DeleteOptions{GracePeriodSeconds: new(int64)}); err != nil {
return fmt.Errorf("failed to delete pod1: %w", err)
}
return nil
},
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
},
}

for _, tt := range tests {
Expand Down

0 comments on commit 024deae

Please sign in to comment.