Skip to content

Commit

Permalink
Merge pull request kubernetes#93332 from hasheddan/wait-for-owned
Browse files Browse the repository at this point in the history
Wait for resources owned by pod to be cleaned up in sig-storage tests
  • Loading branch information
k8s-ci-robot authored Jul 23, 2020
2 parents 09e2230 + efe3747 commit 04ecdb9
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 4 deletions.
8 changes: 4 additions & 4 deletions test/e2e/storage/testsuites/ephemeral.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
}

defer StopPod(f.ClientSet, pod2)
defer StopPodAndDependents(f.ClientSet, pod2)
return nil
}

Expand Down Expand Up @@ -302,7 +302,7 @@ func (t EphemeralTest) TestEphemeral() {
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
defer func() {
// pod might be nil now.
StopPod(client, pod)
StopPodAndDependents(client, pod)
}()
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
Expand All @@ -315,7 +315,7 @@ func (t EphemeralTest) TestEphemeral() {
runningPodData = t.RunningPodCheck(pod)
}

StopPod(client, pod)
StopPodAndDependents(client, pod)
pod = nil // Don't stop twice.

// There should be no dangling PVCs in the namespace now. There might be for
Expand Down Expand Up @@ -446,7 +446,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc
switch {
case err == nil:
// Pod was created, feature supported.
StopPod(c, pod)
StopPodAndDependents(c, pod)
return true, nil
case apierrors.IsInvalid(err):
// "Invalid" because it uses a feature that isn't supported.
Expand Down
32 changes: 32 additions & 0 deletions test/e2e/storage/testsuites/provisioning.go
Original file line number Diff line number Diff line change
Expand Up @@ -712,6 +712,38 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
e2epod.DeletePodWithWait(c, pod)
}

// StopPodAndDependents first tries to log the output of the pod's container,
// then deletes the pod and waits for that to succeed. Also waits for all owned
// resources to be deleted.
func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
if pod == nil {
return
}
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
deletionPolicy := metav1.DeletePropagationForeground
err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name,
metav1.DeleteOptions{
// If the pod is the owner of some resources (like ephemeral inline volumes),
// then we want to be sure that those are also gone before we return.
// Blocking pod deletion via metav1.DeletePropagationForeground achieves that.
PropagationPolicy: &deletionPolicy,
})
if err != nil {
if apierrors.IsNotFound(err) {
return // assume pod was already deleted
}
framework.Logf("pod Delete API error: %v", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name)
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout)
}

func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
for _, claim := range pvcs {
// Get new copy of the claim
Expand Down

0 comments on commit 04ecdb9

Please sign in to comment.