diff --git a/tests/container_disk_test.go b/tests/container_disk_test.go index fa0c5ee62c7c..853f083bb604 100644 --- a/tests/container_disk_test.go +++ b/tests/container_disk_test.go @@ -187,8 +187,9 @@ var _ = Describe("[rfe_id:588][crit:medium][vendor:cnv-qe@redhat.com][level:comp Describe("[rfe_id:273][crit:medium][vendor:cnv-qe@redhat.com][level:component]Starting with virtio-win", func() { Context("with virtio-win as secondary disk", func() { It("[test_id:1467]should boot and have the virtio as sata CDROM", func() { - vmi := libvmi.NewAlpine() - tests.AddEphemeralCdrom(vmi, "disk4", v1.DiskBusSATA, cd.ContainerDiskFor(cd.ContainerDiskVirtio)) + vmi := libvmi.NewAlpine( + libvmi.WithEphemeralCDRom("disk4", v1.DiskBusSATA, cd.ContainerDiskFor(cd.ContainerDiskVirtio)), + ) vmi = tests.RunVMIAndExpectLaunch(vmi, 60) By("Checking whether the second disk really contains virtio drivers") diff --git a/tests/dryrun_test.go b/tests/dryrun_test.go index 660b8d5a3eca..f1662a94265c 100644 --- a/tests/dryrun_test.go +++ b/tests/dryrun_test.go @@ -26,6 +26,7 @@ import ( "kubevirt.io/kubevirt/tests/decorators" "kubevirt.io/kubevirt/tests/framework/kubevirt" + "kubevirt.io/kubevirt/tests/libmigration" "kubevirt.io/kubevirt/tests/libvmi" . "github.com/onsi/ginkgo/v2" @@ -233,7 +234,7 @@ var _ = Describe("[sig-compute]Dry-Run requests", decorators.SigCompute, func() vmi := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskAlpine)) vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Create(context.Background(), vmi) Expect(err).ToNot(HaveOccurred()) - vmim = tests.NewRandomMigration(vmi.Name, vmi.Namespace) + vmim = libmigration.New(vmi.Name, vmi.Namespace) }) It("[test_id:7635]create a migration", func() { diff --git a/tests/hotplug/cpu.go b/tests/hotplug/cpu.go index 0e9ece6ef93d..6dcf42082df0 100644 --- a/tests/hotplug/cpu.go +++ b/tests/hotplug/cpu.go @@ -215,7 +215,7 @@ var _ = Describe("[sig-compute][Serial]CPU Hotplug", decorators.SigCompute, deco })) By("starting the migration") - migration := tests.NewRandomMigration(vm.Name, vm.Namespace) + migration := libmigration.New(vm.Name, vm.Namespace) migration, err = virtClient.VirtualMachineInstanceMigration(vm.Namespace).Create(migration, &metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/hyperv_test.go b/tests/hyperv_test.go index c0404ed0e7f8..2a7057a37070 100644 --- a/tests/hyperv_test.go +++ b/tests/hyperv_test.go @@ -88,7 +88,7 @@ var _ = Describe("[Serial][sig-compute] Hyper-V enlightenments", Serial, decorat reEnlightenmentVMI = libwait.WaitForSuccessfulVMIStart(reEnlightenmentVMI) By("Migrating the VM") - migration := tests.NewRandomMigration(reEnlightenmentVMI.Name, reEnlightenmentVMI.Namespace) + migration := libmigration.New(reEnlightenmentVMI.Name, reEnlightenmentVMI.Namespace) migrationUID := libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("Checking VMI, confirm migration state") diff --git a/tests/kubectl_test.go b/tests/kubectl_test.go index 33371115a964..0b7dff16e4cf 100644 --- a/tests/kubectl_test.go +++ b/tests/kubectl_test.go @@ -182,7 +182,7 @@ var _ = Describe("[sig-compute]oc/kubectl integration", decorators.SigCompute, f Expect(console.LoginToCirros(vmi)).To(Succeed()) By("creating the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) var migrationCreated *v1.VirtualMachineInstanceMigration By("starting migration") diff --git a/tests/libmigration/migration.go b/tests/libmigration/migration.go index 063e2250e80d..49e08e29d825 100644 --- a/tests/libmigration/migration.go +++ b/tests/libmigration/migration.go @@ -37,6 +37,22 @@ import ( const MigrationWaitTime = 240 +func New(vmiName string, namespace string) *v1.VirtualMachineInstanceMigration { + return &v1.VirtualMachineInstanceMigration{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1.GroupVersion.String(), + Kind: "VirtualMachineInstanceMigration", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "test-migration-", + Namespace: namespace, + }, + Spec: v1.VirtualMachineInstanceMigrationSpec{ + VMIName: vmiName, + }, + } +} + func ExpectMigrationToSucceed(virtClient kubecli.KubevirtClient, migration *v1.VirtualMachineInstanceMigration, timeout int) *v1.VirtualMachineInstanceMigration { return ExpectMigrationToSucceedWithOffset(2, virtClient, migration, timeout) } diff --git a/tests/libvmi/storage.go b/tests/libvmi/storage.go index 04e1b24c3240..49dbca25a5ea 100644 --- a/tests/libvmi/storage.go +++ b/tests/libvmi/storage.go @@ -80,6 +80,14 @@ func WithCDRom(cdRomName string, bus v1.DiskBus, claimName string) Option { } } +// WithEphemeralCDRom specifies a CDRom drive to be used. +func WithEphemeralCDRom(cdRomName string, bus v1.DiskBus, claimName string) Option { + return func(vmi *v1.VirtualMachineInstance) { + addDisk(vmi, newCDRom(cdRomName, bus)) + addVolume(vmi, newContainerVolume(cdRomName, claimName)) + } +} + // WithFilesystemPVC specifies a filesystem backed by a PVC to be used. func WithFilesystemPVC(claimName string) Option { return func(vmi *v1.VirtualMachineInstance) { diff --git a/tests/migration/eviction_strategy.go b/tests/migration/eviction_strategy.go index ead7361e848e..81d1c21e5c69 100644 --- a/tests/migration/eviction_strategy.go +++ b/tests/migration/eviction_strategy.go @@ -33,6 +33,7 @@ import ( "kubevirt.io/kubevirt/tests/framework/cleanup" "kubevirt.io/kubevirt/tests/framework/kubevirt" "kubevirt.io/kubevirt/tests/framework/matcher" + "kubevirt.io/kubevirt/tests/libmigration" "kubevirt.io/kubevirt/tests/libnet" "kubevirt.io/kubevirt/tests/libnode" "kubevirt.io/kubevirt/tests/libvmi" @@ -242,7 +243,7 @@ var _ = SIGMigrationDescribe("Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration, err := virtClient.VirtualMachineInstanceMigration(vmi.Namespace).Create(migration, &metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/migration/migration.go b/tests/migration/migration.go index d4fac0fd2b0d..3b8bced18831 100644 --- a/tests/migration/migration.go +++ b/tests/migration/migration.go @@ -255,7 +255,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { assertConnectivityToService("Asserting connectivity through service before migration") By("Executing a migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) @@ -265,11 +265,23 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { }) Describe("Starting a VirtualMachineInstance ", func() { - guestAgentMigrationTestFunc := func(pvName string, memoryRequestSize resource.Quantity, migrationPolicy *migrationsv1.MigrationPolicy) { + guestAgentMigrationTestFunc := func(pvName string, memoryRequestSize string, migrationPolicy *migrationsv1.MigrationPolicy) { By("Creating the VMI") - vmi := tests.NewRandomVMIWithPVC(pvName) - vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = memoryRequestSize - vmi.Spec.Domain.Devices.Rng = &v1.Rng{} + + // add userdata for guest agent and service account mount + mountSvcAccCommands := fmt.Sprintf(`#!/bin/bash + mkdir /mnt/servacc + mount /dev/$(lsblk --nodeps -no name,serial | grep %s | cut -f1 -d' ') /mnt/servacc + `, secretDiskSerial) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", pvName), + libvmi.WithResourceMemory(memoryRequestSize), + libvmi.WithRng(), + libvmi.WithCloudInitNoCloudEncodedUserData(mountSvcAccCommands), + libvmi.WithServiceAccountDisk("default"), + ) mode := v1.MigrationPreCopy if migrationPolicy != nil && migrationPolicy.Spec.AllowPostCopy != nil && *migrationPolicy.Spec.AllowPostCopy { @@ -281,14 +293,6 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi.Namespace = testsuite.NamespacePrivileged } - // add userdata for guest agent and service account mount - mountSvcAccCommands := fmt.Sprintf(`#!/bin/bash - mkdir /mnt/servacc - mount /dev/$(lsblk --nodeps -no name,serial | grep %s | cut -f1 -d' ') /mnt/servacc - `, secretDiskSerial) - tests.AddUserData(vmi, "cloud-init", mountSvcAccCommands) - - tests.AddServiceAccountDisk(vmi, "default") disks := vmi.Spec.Domain.Devices.Disks disks[len(disks)-1].Serial = secretDiskSerial @@ -311,7 +315,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration for iteration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("Checking VMI, confirm migration state") libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) @@ -351,7 +355,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(vmi).To(HaveConditionFalse(v1.VirtualMachineInstanceIsMigratable)) // execute a migration, wait for finalized state - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) By("Starting a Migration") migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) @@ -373,7 +377,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToAlpine(vmi)).To(Succeed()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -420,7 +424,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToAlpine(vmi)).To(Succeed()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -439,7 +443,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToAlpine(vmi)).To(Succeed()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -456,10 +460,11 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { It("[test_id:6970]should migrate vmi with cdroms on various bus types", func() { vmi := libvmi.NewAlpineWithTestTooling( - libvmi.WithMasqueradeNetworking()..., + append(libvmi.WithMasqueradeNetworking(), + libvmi.WithEphemeralCDRom("cdrom-0", v1.DiskBusSATA, cd.ContainerDiskFor(cd.ContainerDiskAlpine)), + libvmi.WithEphemeralCDRom("cdrom-1", v1.DiskBusSCSI, cd.ContainerDiskFor(cd.ContainerDiskAlpine)), + )..., ) - tests.AddEphemeralCdrom(vmi, "cdrom-0", v1.DiskBusSATA, cd.ContainerDiskFor(cd.ContainerDiskAlpine)) - tests.AddEphemeralCdrom(vmi, "cdrom-1", v1.DiskBusSCSI, cd.ContainerDiskFor(cd.ContainerDiskAlpine)) By("Starting the VirtualMachineInstance") vmi = tests.RunVMIAndExpectLaunch(vmi, 240) @@ -469,7 +474,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -491,7 +496,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -500,18 +505,43 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { It("should migrate vmi and use Live Migration method with read-only disks", func() { By("Defining a VMI with PVC disk and read-only CDRoms") - vmi, _ := tests.NewRandomVirtualMachineInstanceWithBlockDisk(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine), testsuite.GetTestNamespace(nil), k8sv1.ReadWriteMany) - vmi.Spec.Hostname = string(cd.ContainerDiskAlpine) - - tests.AddEphemeralCdrom(vmi, "cdrom-0", v1.DiskBusSATA, cd.ContainerDiskFor(cd.ContainerDiskAlpine)) - tests.AddEphemeralCdrom(vmi, "cdrom-1", v1.DiskBusSCSI, cd.ContainerDiskFor(cd.ContainerDiskAlpine)) + if !libstorage.HasCDI() { + Skip("Skip DataVolume tests when CDI is not present") + } + sc, exists := libstorage.GetRWXBlockStorageClass() + if !exists { + Skip("Skip test when Block storage is not present") + } + dv := libdv.NewDataVolume( + libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine), cdiv1.RegistryPullNode), + libdv.WithPVC( + libdv.PVCWithStorageClass(sc), + libdv.PVCWithVolumeSize(cd.CirrosVolumeSize), + libdv.PVCWithAccessMode(k8sv1.ReadWriteMany), + libdv.PVCWithVolumeMode(k8sv1.PersistentVolumeBlock), + ), + ) + dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(dv)).Create(context.Background(), dv, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + libstorage.EventuallyDV(dv, 240, Or(HaveSucceeded(), BeInPhase(cdiv1.WaitForFirstConsumer), BeInPhase(cdiv1.PendingPopulation))) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithDataVolume("disk0", dv.Name), + libvmi.WithResourceMemory("1Gi"), + libvmi.WithEphemeralCDRom("cdrom-0", v1.DiskBusSATA, cd.ContainerDiskFor(cd.ContainerDiskAlpine)), + libvmi.WithEphemeralCDRom("cdrom-1", v1.DiskBusSCSI, cd.ContainerDiskFor(cd.ContainerDiskAlpine)), + ) + vmi.Spec.Hostname = string(cd.ContainerDiskAlpine) By("Starting the VirtualMachineInstance") - vmi = tests.RunVMIAndExpectLaunch(vmi, 240) + vmi, err = virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Create(context.Background(), vmi) + Expect(err).ToNot(HaveOccurred()) + libwait.WaitForSuccessfulVMIStart(vmi, libwait.WithTimeout(240)) // execute a migration, wait for finalized state By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -535,7 +565,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToFedora(vmi)).To(Succeed()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) @@ -593,7 +623,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(timerFrequency).ToNot(BeEmpty()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) @@ -626,7 +656,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -660,7 +690,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -683,7 +713,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { for i := 0; i < num; i++ { // execute a migration, wait for finalized state By(fmt.Sprintf("Starting the Migration for iteration %d", i)) - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -746,7 +776,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By(fmt.Sprintf("Starting the Migration")) - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -766,7 +796,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By(fmt.Sprintf("Starting the Migration")) - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -800,7 +830,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(isPausedb).To(BeTrue(), "The VMI should be paused before migration, but it is not.") By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -842,7 +872,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration that is expected to fail By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration.Annotations = map[string]string{v1.MigrationUnschedulablePodTimeoutSecondsAnnotation: "130"} var err error @@ -897,7 +927,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration that is expected to fail By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration.Annotations = map[string]string{v1.MigrationPendingPodTimeoutSecondsAnnotation: "130"} // Add a fake continer image to the target pod to force a image pull failure which @@ -969,7 +999,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -999,7 +1029,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -1062,7 +1092,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(vmi).Should(matcher.HaveConditionFalse(v1.VirtualMachineInstanceIsMigratable)) // execute a migration, wait for finalized state - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) By("Starting a Migration") migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) @@ -1079,7 +1109,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToAlpine(vmi)).To(Succeed()) By("Starting a Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -1103,7 +1133,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { runStressTest(vmi, stressDefaultVMSize, 60) By("Starting a first migration") - migration1 := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration1 := libmigration.New(vmi.Name, vmi.Namespace) migration1, err = virtClient.VirtualMachineInstanceMigration(migration1.Namespace).Create(migration1, &metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -1115,7 +1145,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { go func(n int) { defer GinkgoRecover() defer wg.Done() - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) _, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) Expect(err).To(HaveOccurred(), fmt.Sprintf("Extra migration %d should have failed to create", n)) Expect(err.Error()).To(ContainSubstring(`admission webhook "migration-create-validator.kubevirt.io" denied the request: in-flight migration detected.`)) @@ -1141,7 +1171,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToAlpine(vmi)).To(Succeed()) By("Starting a Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -1156,7 +1186,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToAlpine(vmi)).To(Succeed()) // execute a migration, wait for finalized state - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 180) // check VMI, confirm migration state @@ -1179,7 +1209,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration for iteration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -1222,25 +1252,29 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { It("[test_id:2653] should be migrated successfully, using guest agent on VM with default migration configuration", func() { By("Creating the DV") createDV(testsuite.NamespacePrivileged) - guestAgentMigrationTestFunc(dv.Name, resource.MustParse(fedoraVMSize), nil) + guestAgentMigrationTestFunc(dv.Name, fedoraVMSize, nil) }) It("[test_id:6975] should have guest agent functional after migration", func() { By("Creating the DV") createDV(testsuite.GetTestNamespace(nil)) By("Creating the VMI") - vmi = tests.NewRandomVMIWithPVC(dv.Name) - vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse(fedoraVMSize) - vmi.Spec.Domain.Devices.Rng = &v1.Rng{} + vmi = libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", dv.Name), + libvmi.WithResourceMemory(fedoraVMSize), + libvmi.WithRng(), + libvmi.WithCloudInitNoCloudEncodedUserData("#!/bin/bash\n echo hello\n"), + ) - tests.AddUserData(vmi, "cloud-init", "#!/bin/bash\n echo hello\n") vmi = tests.RunVMIAndExpectLaunchIgnoreWarnings(vmi, 180) By("Checking guest agent") Eventually(matcher.ThisVMI(vmi), 12*time.Minute, 2*time.Second).Should(matcher.HaveConditionTrue(v1.VirtualMachineInstanceAgentConnected)) By("Starting the Migration for iteration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) _ = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("Agent stays connected") @@ -1318,7 +1352,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { tests.DisableFeatureGate(virtconfig.Root) By("Starting new migration and waiting for it to succeed") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 340) By("Verifying Second Migration Succeeeds") @@ -1352,7 +1386,12 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Entry("[test_id:8612] with PVC", func() *v1.VirtualMachineInstance { dv = createDataVolumePVCAndChangeDiskImgPermissions(testsuite.NamespacePrivileged, size) // Use the Underlying PVC - return tests.NewRandomVMIWithPVC(dv.Name) + return libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", dv.Name), + libvmi.WithResourceMemory("128Mi"), + ) }, console.LoginToAlpine), ) }) @@ -1398,7 +1437,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { tests.EnableFeatureGate(virtconfig.Root) By("Starting new migration and waiting for it to succeed") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 340) By("Verifying Second Migration Succeeeds") @@ -1432,7 +1471,13 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Entry("with PVC", func() *v1.VirtualMachineInstance { dv = createDataVolumePVCAndChangeDiskImgPermissions(testsuite.NamespacePrivileged, size) // Use the underlying PVC - return tests.NewRandomVMIWithPVC(dv.Name) + return libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", dv.Name), + libvmi.WithResourceMemory("128Mi"), + ) + }, console.LoginToAlpine), ) }) @@ -1454,7 +1499,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(console.LoginToAlpine(vmi)).To(Succeed()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -1482,7 +1527,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) By("Waiting for the proxy connection details to appear") @@ -1561,7 +1606,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -1666,7 +1711,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { }) It("[test_id:5004] should be migrated successfully, using guest agent on VM with postcopy", func() { - guestAgentMigrationTestFunc(dv.Name, resource.MustParse("1Gi"), migrationPolicy) + guestAgentMigrationTestFunc(dv.Name, "1Gi", migrationPolicy) }) }) @@ -1718,7 +1763,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 150) // check VMI, confirm migration state @@ -1783,7 +1828,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migrationUID := libmigration.RunMigrationAndExpectFailure(migration, 180) // check VMI, confirm migration state @@ -1830,7 +1875,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migrationUID := libmigration.RunMigrationAndExpectFailure(migration, 180) // check VMI, confirm migration state @@ -1870,7 +1915,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { }, 120*time.Second, 1*time.Second).Should(Succeed(), "Virt handler should come online") By("Starting new migration and waiting for it to succeed") - migration = tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration = libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 340) By("Verifying Second Migration Succeeeds") @@ -1890,7 +1935,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { for i := 0; i < 10; i++ { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration.Name = fmt.Sprintf("%s-iter-%d", vmi.Name, i) migrationUID := libmigration.RunMigrationAndExpectFailure(migration, 180) @@ -1929,7 +1974,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migrationUID := libmigration.RunMigrationAndExpectFailure(migration, 180) // check VMI, confirm migration state @@ -1962,7 +2007,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -2007,7 +2052,6 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { }) It("Migration should generate empty isos of the right size on the target", func() { By("Creating a VMI with cloud-init and config maps") - vmi := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskAlpine)) configMapName := "configmap-" + rand.String(5) secretName := "secret-" + rand.String(5) downwardAPIName := "downwardapi-" + rand.String(5) @@ -2019,16 +2063,21 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { "user": "admin", "password": "community", } - tests.CreateConfigMap(configMapName, vmi.Namespace, config_data) - tests.CreateSecret(secretName, vmi.Namespace, secret_data) - tests.AddConfigMapDisk(vmi, configMapName, configMapName) - tests.AddSecretDisk(vmi, secretName, secretName) - tests.AddServiceAccountDisk(vmi, "default") + + tests.CreateConfigMap(configMapName, testsuite.GetTestNamespace(nil), config_data) + tests.CreateSecret(secretName, testsuite.GetTestNamespace(nil), secret_data) + vmi := libvmi.NewAlpine( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithConfigMapDisk(configMapName, configMapName), + libvmi.WithSecretDisk(secretName, secretName), + libvmi.WithServiceAccountDisk("default"), + libvmi.WithDownwardAPIDisk(downwardAPIName), + ) // In case there are no existing labels add labels to add some data to the downwardAPI disk if vmi.ObjectMeta.Labels == nil { vmi.ObjectMeta.Labels = map[string]string{downwardTestLabelKey: downwardTestLabelVal} } - tests.AddLabelDownwardAPIVolume(vmi, downwardAPIName) // this annotation causes virt launcher to immediately fail a migration vmi.Annotations = map[string]string{v1.FuncTestBlockLauncherPrepareMigrationTargetAnnotation: ""} @@ -2038,7 +2087,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) @@ -2107,7 +2156,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(vmi).Should(HaveConditionFalse(v1.VirtualMachineInstanceIsMigratable)) // execute a migration, wait for finalized state - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) By("Starting a Migration") _, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Create(migration, &metav1.CreateOptions{}) @@ -2162,7 +2211,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunAndCancelMigration(migration, vmi, with_virtctl, 180) @@ -2190,7 +2239,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunAndImmediatelyCancelMigration(migration, vmi, with_virtctl, 60) @@ -2248,7 +2297,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) By("Starting a Migration") const timeout = 180 @@ -2314,7 +2363,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Trying to migrate VM and expect for the migration to get stuck") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigration(virtClient, migration) expectMigrationSchedulingPhase := func() v1.VirtualMachineInstanceMigrationPhase { migration, err = virtClient.VirtualMachineInstanceMigration(migration.Namespace).Get(migration.Name, &metav1.GetOptions{}) @@ -2455,7 +2504,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { requiredFeatures := getNodeHostRequiredFeatures(originalNode) By("Starting the migration and expecting it to end successfully") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) _ = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("Ensuring that target pod has correct nodeSelector label") @@ -2518,7 +2567,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { }, 10*time.Second, 1*time.Second).Should(BeTrue(), "Node should have fake host model") By("Starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) _ = libmigration.RunMigration(virtClient, migration) events.ExpectEvent(vmi, k8sv1.EventTypeWarning, watch.NoSuitableNodesForHostModelMigration) @@ -2581,7 +2630,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { } By("Starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) _ = libmigration.RunMigration(virtClient, migration) events.ExpectEvent(vmi, k8sv1.EventTypeWarning, watch.NoSuitableNodesForHostModelMigration) @@ -2619,7 +2668,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) _ = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 180) }, Entry("a VMI annotation", setMigrationParallelismWithAnnotation), @@ -2663,7 +2712,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 180) // check VMI, confirm migration state @@ -2709,7 +2758,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(domSpec.Devices.Ballooning.FreePageReporting).To(BeEquivalentTo("on")) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) vmi = libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) @@ -2736,7 +2785,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -2763,7 +2812,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndCollectMigrationMetrics(vmi, migration) }) }) @@ -2814,7 +2863,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { libwait.WaitForSuccessfulVMIStart(hugepagesVmi) By("starting the migration") - migration := tests.NewRandomMigration(hugepagesVmi.Name, hugepagesVmi.Namespace) + migration := libmigration.New(hugepagesVmi.Name, hugepagesVmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -2845,7 +2894,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { libwait.WaitForSuccessfulVMIStart(cpuVMI) By("Performing a migration") - migration := tests.NewRandomMigration(cpuVMI.Name, cpuVMI.Namespace) + migration := libmigration.New(cpuVMI.Name, cpuVMI.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) }) Context("and NUMA passthrough", decorators.RequiresTwoWorkerNodesWithCPUManager, func() { @@ -2870,7 +2919,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { libwait.WaitForSuccessfulVMIStart(cpuVMI) By("Performing a migration") - migration := tests.NewRandomMigration(cpuVMI.Name, cpuVMI.Namespace) + migration := libmigration.New(cpuVMI.Name, cpuVMI.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) }) }) @@ -2925,7 +2974,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration to target node(with the amazing feature") - migration := tests.NewRandomMigration(vmiToMigrate.Name, vmiToMigrate.Namespace) + migration := libmigration.New(vmiToMigrate.Name, vmiToMigrate.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) vmiToMigrate, err = virtClient.VirtualMachineInstance(vmiToMigrate.Namespace).Get(context.Background(), vmiToMigrate.GetName(), &metav1.GetOptions{}) @@ -2986,7 +3035,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { // execute a migration, wait for finalized state By("Starting the Migration to target node(with the amazing feature") - migration := tests.NewRandomMigration(vmiToMigrate.Name, vmiToMigrate.Namespace) + migration := libmigration.New(vmiToMigrate.Name, vmiToMigrate.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) vmiToMigrate, err = virtClient.VirtualMachineInstance(vmiToMigrate.Namespace).Get(context.Background(), vmiToMigrate.GetName(), &metav1.GetOptions{}) @@ -3173,7 +3222,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { By("migrating the VMI from first node to second node") libnode.AddLabelToNode(nodes[1].Name, testLabel1, "true") cpuSetSource := getVirtLauncherCPUSet(vmi) - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) @@ -3223,7 +3272,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("Checking if the migration happened, and over the right network") @@ -3238,7 +3287,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Starting a Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, 180) libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) @@ -3260,7 +3309,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { By("Checking that there always is at most one migration running") Consistently(func() int { - vmim := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + vmim := libmigration.New(vmi.Name, vmi.Namespace) // not checking err as the migration creation will be blocked immediately by virt-api's validating webhook // if another one is currently running vmim, err = virtClient.VirtualMachineInstanceMigration(vmi.Namespace).Create(vmim, &metav1.CreateOptions{}) @@ -3347,12 +3396,12 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Waiting for the migration to fail") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) setEvacuationAnnotation(migration) _ = libmigration.RunMigrationAndExpectFailure(migration, libmigration.MigrationWaitTime) By("Try again") - migration = tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration = libmigration.New(vmi.Name, vmi.Namespace) setEvacuationAnnotation(migration) _ = libmigration.RunMigrationAndExpectFailure(migration, libmigration.MigrationWaitTime) @@ -3364,7 +3413,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Waiting for the migration to fail") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) setEvacuationAnnotation(migration) _ = libmigration.RunMigrationAndExpectFailure(migration, libmigration.MigrationWaitTime) @@ -3374,7 +3423,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { Expect(err).ToNot(HaveOccurred()) By("Try again with backoff") - migration = tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration = libmigration.New(vmi.Name, vmi.Namespace) setEvacuationAnnotation(migration) _ = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) @@ -3382,7 +3431,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { events.DeleteEvents(vmi, k8sv1.EventTypeWarning, watch.MigrationBackoffReason) By("There should be no backoff now") - migration = tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration = libmigration.New(vmi.Name, vmi.Namespace) setEvacuationAnnotation(migration) _ = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) @@ -3420,7 +3469,7 @@ var _ = SIGMigrationDescribe("VM Live Migration", func() { _ = tests.RunVMIAndExpectLaunch(vmi, 240) By("Trying to migrate the VirtualMachineInstance") - migration := tests.NewRandomMigration(vmi.Name, testsuite.GetTestNamespace(vmi)) + migration := libmigration.New(vmi.Name, testsuite.GetTestNamespace(vmi)) migration = libmigration.RunMigration(virtClient, migration) Eventually(func() *v1.VirtualMachineInstanceMigration { migration, err := virtClient.VirtualMachineInstanceMigration(migration.Namespace).Get(migration.Name, &metav1.GetOptions{}) diff --git a/tests/monitoring/monitoring.go b/tests/monitoring/monitoring.go index c803e04f3a6a..246a84abd674 100644 --- a/tests/monitoring/monitoring.go +++ b/tests/monitoring/monitoring.go @@ -95,7 +95,7 @@ var _ = Describe("[Serial][sig-monitoring]Monitoring", Serial, decorators.SigMon By("Migrating the VMI 13 times") for i := 0; i < 13; i++ { - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state diff --git a/tests/monitoring/vm_monitoring.go b/tests/monitoring/vm_monitoring.go index 956f7295f1c6..032e928c2275 100644 --- a/tests/monitoring/vm_monitoring.go +++ b/tests/monitoring/vm_monitoring.go @@ -158,7 +158,7 @@ var _ = Describe("[Serial][sig-monitoring]VM Monitoring", Serial, decorators.Sig vmi = tests.RunVMIAndExpectLaunch(vmi, 240) By("Migrating VMIs") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) waitForMetricValue(virtClient, "kubevirt_vmi_migrations_in_pending_phase", 0) @@ -188,7 +188,7 @@ var _ = Describe("[Serial][sig-monitoring]VM Monitoring", Serial, decorators.Sig } By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration.Annotations = map[string]string{v1.MigrationUnschedulablePodTimeoutSecondsAnnotation: "60"} migration = libmigration.RunMigration(virtClient, migration) diff --git a/tests/network/hotplug.go b/tests/network/hotplug.go index ae92537042d4..8467b3e68d7e 100644 --- a/tests/network/hotplug.go +++ b/tests/network/hotplug.go @@ -39,7 +39,6 @@ import ( v1 "kubevirt.io/api/core/v1" - "kubevirt.io/kubevirt/tests" "kubevirt.io/kubevirt/tests/framework/kubevirt" "kubevirt.io/kubevirt/tests/libvmi" ) @@ -162,7 +161,7 @@ func newVMWithOneInterface() *v1.VirtualMachine { func migrate(vmi *v1.VirtualMachineInstance) { By("migrating the VMI") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migrationUID := libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(kubevirt.Client(), migration) libmigration.ConfirmVMIPostMigration(kubevirt.Client(), vmi, migrationUID) } diff --git a/tests/network/macvtap.go b/tests/network/macvtap.go index de6d20899794..af3450784375 100644 --- a/tests/network/macvtap.go +++ b/tests/network/macvtap.go @@ -184,7 +184,7 @@ var _ = SIGDescribe("Macvtap", decorators.Macvtap, func() { It("should be successful when the VMI MAC address is defined in its spec", func() { By("starting the migration") - migration := tests.NewRandomMigration(clientVMI.Name, clientVMI.Namespace) + migration := libmigration.New(clientVMI.Name, clientVMI.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // check VMI, confirm migration state @@ -258,7 +258,7 @@ var _ = SIGDescribe("Macvtap", decorators.Macvtap, func() { It("should keep connectivity after a migration", func() { const containerCompletionWaitTime = 60 - migration := tests.NewRandomMigration(serverVMI.Name, serverVMI.GetNamespace()) + migration := libmigration.New(serverVMI.Name, serverVMI.GetNamespace()) _ = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) // In case of clientVMI and serverVMI running on the same node before migration, the serverVMI // will be reachable only when the original launcher pod terminates. diff --git a/tests/network/sriov.go b/tests/network/sriov.go index e5b8301e22b5..caa0245f6ccb 100644 --- a/tests/network/sriov.go +++ b/tests/network/sriov.go @@ -359,7 +359,7 @@ var _ = Describe("[Serial]SRIOV", Serial, decorators.SRIOV, func() { It("should be successful with a running VMI on the target", func() { By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) diff --git a/tests/network/vmi_istio.go b/tests/network/vmi_istio.go index 569cd372c835..ccb59804612a 100644 --- a/tests/network/vmi_istio.go +++ b/tests/network/vmi_istio.go @@ -188,7 +188,7 @@ var istioTests = func(vmType VmType) { }) JustBeforeEach(func() { sourcePodName = tests.GetVmPodName(virtClient, vmi) - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) }) It("All containers should complete in source virt-launcher pod after migration", func() { diff --git a/tests/network/vmi_networking.go b/tests/network/vmi_networking.go index 98de72fd3c29..ebbe46e7c6d4 100644 --- a/tests/network/vmi_networking.go +++ b/tests/network/vmi_networking.go @@ -883,7 +883,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c Expect(ping(podIP)).To(Succeed()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{}) @@ -923,7 +923,7 @@ var _ = SIGDescribe("[rfe_id:694][crit:medium][vendor:cnv-qe@redhat.com][level:c Expect(ping(podIP)).To(Succeed()) By("starting the migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) vmi, err = virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{}) diff --git a/tests/operator/BUILD.bazel b/tests/operator/BUILD.bazel index 6166df8a6c83..e332e93360bb 100644 --- a/tests/operator/BUILD.bazel +++ b/tests/operator/BUILD.bazel @@ -25,6 +25,7 @@ go_library( "//tests/framework/checks:go_default_library", "//tests/framework/kubevirt:go_default_library", "//tests/framework/matcher:go_default_library", + "//tests/libmigration:go_default_library", "//tests/libnet:go_default_library", "//tests/libnode:go_default_library", "//tests/libstorage:go_default_library", diff --git a/tests/operator/operator.go b/tests/operator/operator.go index 7a3610b7ef6a..c38af4e5df9c 100644 --- a/tests/operator/operator.go +++ b/tests/operator/operator.go @@ -36,10 +36,6 @@ import ( "strings" "time" - "kubevirt.io/kubevirt/tests/libnode" - - "kubevirt.io/kubevirt/tests/decorators" - "github.com/Masterminds/semver" jsonpatch "github.com/evanphx/json-patch" "github.com/google/go-github/v32/github" @@ -81,12 +77,15 @@ import ( "kubevirt.io/kubevirt/tests/clientcmd" "kubevirt.io/kubevirt/tests/console" cd "kubevirt.io/kubevirt/tests/containerdisk" + "kubevirt.io/kubevirt/tests/decorators" "kubevirt.io/kubevirt/tests/flags" "kubevirt.io/kubevirt/tests/framework/checks" "kubevirt.io/kubevirt/tests/framework/kubevirt" "kubevirt.io/kubevirt/tests/framework/matcher" . "kubevirt.io/kubevirt/tests/framework/matcher" + "kubevirt.io/kubevirt/tests/libmigration" "kubevirt.io/kubevirt/tests/libnet" + "kubevirt.io/kubevirt/tests/libnode" "kubevirt.io/kubevirt/tests/libstorage" "kubevirt.io/kubevirt/tests/libvmi" "kubevirt.io/kubevirt/tests/libwait" @@ -719,7 +718,6 @@ var _ = Describe("[Serial][sig-operator]Operator", Serial, decorators.SigOperato generateMigratableVMIs = func(num int) []*v1.VirtualMachineInstance { vmis := []*v1.VirtualMachineInstance{} for i := 0; i < num; i++ { - vmi := tests.NewRandomVMIWithEphemeralDisk(cd.ContainerDiskFor(cd.ContainerDiskCirros)) configMapName := "configmap-" + rand.String(5) secretName := "secret-" + rand.String(5) downwardAPIName := "downwardapi-" + rand.String(5) @@ -734,19 +732,21 @@ var _ = Describe("[Serial][sig-operator]Operator", Serial, decorators.SigOperato "password": "community", } - tests.CreateConfigMap(configMapName, vmi.Namespace, config_data) - tests.CreateSecret(secretName, vmi.Namespace, secret_data) - - tests.AddUserData(vmi, "cloud-init", "#!/bin/bash\necho 'hello'\n") - tests.AddConfigMapDisk(vmi, configMapName, configMapName) - tests.AddSecretDisk(vmi, secretName, secretName) - tests.AddServiceAccountDisk(vmi, "default") + tests.CreateConfigMap(configMapName, testsuite.GetTestNamespace(nil), config_data) + tests.CreateSecret(secretName, testsuite.GetTestNamespace(nil), secret_data) + vmi := libvmi.NewCirros( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithConfigMapDisk(configMapName, configMapName), + libvmi.WithSecretDisk(secretName, secretName), + libvmi.WithServiceAccountDisk("default"), + libvmi.WithDownwardAPIDisk(downwardAPIName), + libvmi.WithWatchdog(v1.WatchdogActionPoweroff), + ) // In case there are no existing labels add labels to add some data to the downwardAPI disk if vmi.ObjectMeta.Labels == nil { vmi.ObjectMeta.Labels = map[string]string{"downwardTestLabelKey": "downwardTestLabelVal"} } - tests.AddLabelDownwardAPIVolume(vmi, downwardAPIName) - tests.AddWatchdog(vmi, v1.WatchdogActionPoweroff) vmis = append(vmis, vmi) } @@ -755,7 +755,7 @@ var _ = Describe("[Serial][sig-operator]Operator", Serial, decorators.SigOperato vmi := vmis[lastVMIIndex] const nadName = "secondarynet" - Expect(libnet.CreateNAD(vmi.GetNamespace(), nadName)).To(Succeed()) + Expect(libnet.CreateNAD(testsuite.GetTestNamespace(vmi), nadName)).To(Succeed()) const networkName = "tenant-blue" vmi.Spec.Domain.Devices.Interfaces = append( @@ -803,7 +803,7 @@ var _ = Describe("[Serial][sig-operator]Operator", Serial, decorators.SigOperato deleteAllVMIs = func(vmis []*v1.VirtualMachineInstance) { for _, vmi := range vmis { - err := virtClient.VirtualMachineInstance(vmi.Namespace).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{}) + err := virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Delete(context.Background(), vmi.Name, &metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred(), "Delete VMI successfully") } } @@ -828,7 +828,7 @@ var _ = Describe("[Serial][sig-operator]Operator", Serial, decorators.SigOperato Eventually(func() error { for _, vmi := range vmis { - vmi, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(context.Background(), vmi.Name, &metav1.GetOptions{}) + vmi, err := virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Get(context.Background(), vmi.Name, &metav1.GetOptions{}) if err != nil { return err } @@ -1932,7 +1932,7 @@ spec: if len(migratableVMIs) > 0 { By("Verifying that a once migrated VMI after an update can be migrated again") vmi := migratableVMIs[0] - migration, err := virtClient.VirtualMachineInstanceMigration(vmi.Namespace).Create(tests.NewRandomMigration(vmi.Name, vmi.Namespace), &metav1.CreateOptions{}) + migration, err := virtClient.VirtualMachineInstanceMigration(testsuite.GetTestNamespace(vmi)).Create(libmigration.New(vmi.Name, vmi.Namespace), &metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) Eventually(ThisMigration(migration), 180).Should(HaveSucceeded()) } diff --git a/tests/security_features_test.go b/tests/security_features_test.go index 546df07c69a0..a895750272c5 100644 --- a/tests/security_features_test.go +++ b/tests/security_features_test.go @@ -338,7 +338,7 @@ var _ = Describe("[Serial][sig-compute]SecurityFeatures", Serial, decorators.Sig Expect(stdout).To(ContainSubstring(seContext)) By("Migrating the VMI") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("Ensuring the VMI SELinux context status didn't change") diff --git a/tests/storage/datavolume.go b/tests/storage/datavolume.go index 7cfdd80ccfe5..5a5dd9e818f8 100644 --- a/tests/storage/datavolume.go +++ b/tests/storage/datavolume.go @@ -323,7 +323,12 @@ var _ = SIGDescribe("DataVolume Integration", func() { libdv.WithPVC(libdv.PVCWithStorageClass(sc)), ) - vmi := tests.NewRandomVMIWithPVC(dataVolume.Name) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", dataVolume.Name), + libvmi.WithResourceMemory("128Mi"), + ) dataVolume, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(), dataVolume, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) diff --git a/tests/storage/events.go b/tests/storage/events.go index d470ff63ddd4..78b9e4039007 100644 --- a/tests/storage/events.go +++ b/tests/storage/events.go @@ -23,20 +23,20 @@ import ( "context" "time" - "kubevirt.io/kubevirt/tests/framework/kubevirt" - . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" k8sv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "kubevirt.io/kubevirt/tests/events" - "kubevirt.io/kubevirt/tests/testsuite" - + v1 "kubevirt.io/api/core/v1" "kubevirt.io/client-go/kubecli" "kubevirt.io/kubevirt/tests" + "kubevirt.io/kubevirt/tests/events" + "kubevirt.io/kubevirt/tests/framework/kubevirt" + "kubevirt.io/kubevirt/tests/libvmi" "kubevirt.io/kubevirt/tests/libwait" + "kubevirt.io/kubevirt/tests/testsuite" ) const ( @@ -71,7 +71,13 @@ var _ = SIGDescribe("[Serial]K8s IO events", Serial, func() { }) It("[test_id:6225]Should catch the IO error event", func() { By("Creating VMI with faulty disk") - vmi := tests.NewRandomVMIWithPVC(pvc.Name) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", pvc.Name), + libvmi.WithResourceMemory("128Mi"), + ) + Eventually(func() error { var err error vmi, err = virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Create(context.Background(), vmi) diff --git a/tests/storage/hotplug.go b/tests/storage/hotplug.go index 5277bdb0eab5..289afae9c739 100644 --- a/tests/storage/hotplug.go +++ b/tests/storage/hotplug.go @@ -1249,7 +1249,7 @@ var _ = SIGDescribe("Hotplug", func() { } Expect(sourceAttachmentPods).To(HaveLen(1)) - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) libmigration.ConfirmVMIPostMigration(virtClient, vmi, migration) By("Verifying the volume is still accessible and usable") diff --git a/tests/storage/snapshot.go b/tests/storage/snapshot.go index 068e6ef0e4bd..fde4fc588070 100644 --- a/tests/storage/snapshot.go +++ b/tests/storage/snapshot.go @@ -1331,13 +1331,19 @@ var _ = SIGDescribe("VirtualMachineSnapshot Tests", func() { libstorage.DeleteDataVolume(&dv) }) - DescribeTable("should accurately report DataVolume provisioning", func(vmif func(string) *v1.VirtualMachineInstance) { + DescribeTable("should accurately report DataVolume provisioning", func(storageOptFun func(string, string) libvmi.Option, memory string) { dataVolume := libdv.NewDataVolume( libdv.WithRegistryURLSourceAndPullMethod(cd.DataVolumeImportUrlForContainerDisk(cd.ContainerDiskAlpine), cdiv1.RegistryPullNode), libdv.WithPVC(libdv.PVCWithStorageClass(snapshotStorageClass)), ) - vmi := vmif(dataVolume.Name) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithResourceMemory(memory), + libvmi.WithNamespace(testsuite.GetTestNamespace(nil)), + storageOptFun("disk0", dataVolume.Name), + ) vm = libvmi.NewVirtualMachine(vmi) _, err := virtClient.VirtualMachine(vm.Namespace).Create(context.Background(), vm) @@ -1360,8 +1366,8 @@ var _ = SIGDescribe("VirtualMachineSnapshot Tests", func() { vm.Status.VolumeSnapshotStatuses[0].Enabled }, 180*time.Second, 1*time.Second).Should(BeTrue()) }, - Entry("with DataVolume volume", tests.NewRandomVMIWithDataVolume), - Entry("with PVC volume", tests.NewRandomVMIWithPVC), + Entry("with DataVolume volume", libvmi.WithDataVolume, "1Gi"), + Entry("with PVC volume", libvmi.WithPersistentVolumeClaim, "128Mi"), ) It("[test_id:9705]Should show included and excluded volumes in the snapshot", func() { @@ -1386,9 +1392,14 @@ var _ = SIGDescribe("VirtualMachineSnapshot Tests", func() { dv, err = virtClient.CdiClient().CdiV1beta1().DataVolumes(testsuite.GetTestNamespace(nil)).Create(context.Background(), excludedDataVolume, metav1.CreateOptions{}) Expect(err).ToNot(HaveOccurred()) - vmi := tests.NewRandomVMI() - vmi = tests.AddPVCDisk(vmi, "snapshotablevolume", v1.DiskBusVirtio, includedDataVolume.Name) - vmi = tests.AddPVCDisk(vmi, "notsnapshotablevolume", v1.DiskBusVirtio, excludedDataVolume.Name) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithResourceMemory("128Mi"), + libvmi.WithNamespace(testsuite.GetTestNamespace(nil)), + libvmi.WithPersistentVolumeClaim("snapshotablevolume", includedDataVolume.Name), + libvmi.WithPersistentVolumeClaim("notsnapshotablevolume", excludedDataVolume.Name), + ) vm = libvmi.NewVirtualMachine(vmi) _, err := virtClient.VirtualMachine(vm.Namespace).Create(context.Background(), vm) diff --git a/tests/swap_test.go b/tests/swap_test.go index 2b66cd39737f..b38b6d3335e4 100644 --- a/tests/swap_test.go +++ b/tests/swap_test.go @@ -205,7 +205,7 @@ var _ = Describe("[Serial][sig-compute]SwapTest", Serial, decorators.SigCompute, // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) migration = libmigration.RunMigrationAndExpectToComplete(virtClient, migration, libmigration.MigrationWaitTime*2) // check VMI, confirm migration state @@ -271,7 +271,7 @@ var _ = Describe("[Serial][sig-compute]SwapTest", Serial, decorators.SigCompute, // execute a migration, wait for finalized state By("Starting the Migration") - migration := tests.NewRandomMigration(vmiToMigrate.Name, vmiToMigrate.Namespace) + migration := libmigration.New(vmiToMigrate.Name, vmiToMigrate.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("The workloads in the node should consume more memory than the memory size eventually.") diff --git a/tests/tests_suite_test.go b/tests/tests_suite_test.go index 49f9c2cac7ce..aa0cf336028b 100644 --- a/tests/tests_suite_test.go +++ b/tests/tests_suite_test.go @@ -31,6 +31,7 @@ import ( "kubevirt.io/kubevirt/tests" "kubevirt.io/kubevirt/tests/flags" + "kubevirt.io/kubevirt/tests/libnode" "kubevirt.io/kubevirt/tests/reporter" "kubevirt.io/kubevirt/tests/testsuite" @@ -105,7 +106,7 @@ var _ = SynchronizedBeforeSuite(testsuite.SynchronizedBeforeTestSetup, testsuite var _ = SynchronizedAfterSuite(testsuite.AfterTestSuiteCleanup, testsuite.SynchronizedAfterTestSuiteCleanup) var _ = AfterEach(func() { - tests.TestCleanup() + testCleanup() }) func getMaxFailsFromEnv() int { @@ -140,3 +141,25 @@ var _ = ReportAfterSuite("TestTests", func(report Report) { var _ = JustAfterEach(func() { k8sReporter.ReportSpec(CurrentSpecReport()) }) + +func testCleanup() { + GinkgoWriter.Println("Global test cleanup started.") + testsuite.CleanNamespaces() + libnode.CleanNodes() + resetToDefaultConfig() + testsuite.EnsureKubevirtReady() + tests.SetupAlpineHostPath() + GinkgoWriter.Println("Global test cleanup ended.") +} + +// resetToDefaultConfig resets the config to the state found when the test suite started. It will wait for the config to +// be propagated to all components before it returns. It will only update the configuration and wait for it to be +// propagated if the current config in use does not match the original one. +func resetToDefaultConfig() { + if !CurrentSpecReport().IsSerial { + // Tests which alter the global kubevirt config must be run serial, therefor, if we run in parallel + // we can just skip the restore step. + return + } + tests.UpdateKubeVirtConfigValueAndWait(testsuite.KubeVirtDefaultConfig) +} diff --git a/tests/utils.go b/tests/utils.go index ac7d1900de95..86b86965dd78 100644 --- a/tests/utils.go +++ b/tests/utils.go @@ -106,16 +106,6 @@ const ( defaultDiskSize = "1Gi" ) -func TestCleanup() { - GinkgoWriter.Println("Global test cleanup started.") - testsuite.CleanNamespaces() - libnode.CleanNodes() - resetToDefaultConfig() - testsuite.EnsureKubevirtReady() - SetupAlpineHostPath() - GinkgoWriter.Println("Global test cleanup ended.") -} - func SetupAlpineHostPath() { const osAlpineHostPath = "alpine-host-path" libstorage.CreateHostPathPv(osAlpineHostPath, testsuite.GetTestNamespace(nil), testsuite.HostPathAlpine) @@ -613,22 +603,6 @@ func NewRandomVMIWithEphemeralDiskAndConfigDriveUserdataHighMemory(containerImag return vmi } -func NewRandomMigration(vmiName string, namespace string) *v1.VirtualMachineInstanceMigration { - return &v1.VirtualMachineInstanceMigration{ - TypeMeta: metav1.TypeMeta{ - APIVersion: v1.GroupVersion.String(), - Kind: "VirtualMachineInstanceMigration", - }, - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-migration-", - Namespace: namespace, - }, - Spec: v1.VirtualMachineInstanceMigrationSpec{ - VMIName: vmiName, - }, - } -} - // NewRandomVMIWithEphemeralDisk // // Deprecated: Use libvmi directly @@ -666,67 +640,6 @@ func AddEphemeralDisk(vmi *v1.VirtualMachineInstance, name string, bus v1.DiskBu return vmi } -// AddBootOrderToDisk -// -// Deprecated: Use libvmi -func AddBootOrderToDisk(vmi *v1.VirtualMachineInstance, diskName string, bootorder *uint) *v1.VirtualMachineInstance { - for i, d := range vmi.Spec.Domain.Devices.Disks { - if d.Name == diskName { - vmi.Spec.Domain.Devices.Disks[i].BootOrder = bootorder - return vmi - } - } - return vmi -} - -// AddPVCDisk -// -// Deprecated: Use libvmi -func AddPVCDisk(vmi *v1.VirtualMachineInstance, name string, bus v1.DiskBus, claimName string) *v1.VirtualMachineInstance { - vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{ - Name: name, - DiskDevice: v1.DiskDevice{ - Disk: &v1.DiskTarget{ - Bus: bus, - }, - }, - }) - vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{ - Name: name, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{PersistentVolumeClaimVolumeSource: k8sv1.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - }}, - }, - }) - - return vmi -} - -// AddEphemeralCdrom -// -// Deprecated: Use libvmi -func AddEphemeralCdrom(vmi *v1.VirtualMachineInstance, name string, bus v1.DiskBus, image string) *v1.VirtualMachineInstance { - vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{ - Name: name, - DiskDevice: v1.DiskDevice{ - CDRom: &v1.CDRomTarget{ - Bus: bus, - }, - }, - }) - vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{ - Name: name, - VolumeSource: v1.VolumeSource{ - ContainerDisk: &v1.ContainerDiskSource{ - Image: image, - }, - }, - }) - - return vmi -} - // NewRandomFedoraVMI // // Deprecated: Use libvmi directly @@ -857,16 +770,6 @@ func addCloudInitDiskAndVolume(vmi *v1.VirtualMachineInstance, name string, volu }) } -// NewRandomVMIWithPVC -// -// Deprecated: Use libvmi -func NewRandomVMIWithPVC(claimName string) *v1.VirtualMachineInstance { - vmi := NewRandomVMI() - - vmi = AddPVCDisk(vmi, "disk0", v1.DiskBusVirtio, claimName) - return vmi -} - func DeletePvAndPvc(name string) { virtCli := kubevirt.Client() @@ -881,109 +784,6 @@ func DeletePvAndPvc(name string) { } } -// AddConfigMapDisk -// -// Deprecated: Use libvmi -func AddConfigMapDisk(vmi *v1.VirtualMachineInstance, configMapName string, volumeName string) { - vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{ - LocalObjectReference: k8sv1.LocalObjectReference{ - Name: configMapName, - }, - VolumeLabel: "", - }, - }, - }) - vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{ - Name: volumeName, - }) -} - -// AddSecretDisk -// -// Deprecated: Use libvmi -func AddSecretDisk(vmi *v1.VirtualMachineInstance, secretName string, volumeName string) { - AddSecretDiskWithCustomLabel(vmi, secretName, volumeName, "") -} - -// AddSecretDiskWithCustomLabel -// -// Deprecated: Use libvmi -func AddSecretDiskWithCustomLabel(vmi *v1.VirtualMachineInstance, secretName string, volumeName string, volumeLabel string) { - vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: secretName, - VolumeLabel: volumeLabel, - }, - }, - }) - vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{ - Name: volumeName, - }) -} - -// AddLabelDownwardAPIVolume -// -// Deprecated: Use libvmi -func AddLabelDownwardAPIVolume(vmi *v1.VirtualMachineInstance, volumeName string) { - vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - DownwardAPI: &v1.DownwardAPIVolumeSource{ - Fields: []k8sv1.DownwardAPIVolumeFile{ - { - Path: "labels", - FieldRef: &k8sv1.ObjectFieldSelector{ - FieldPath: "metadata.labels", - }, - }, - }, - VolumeLabel: "", - }, - }, - }) - - vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{ - Name: volumeName, - }) -} - -// AddServiceAccountDisk -// -// Deprecated: Use libvmi -func AddServiceAccountDisk(vmi *v1.VirtualMachineInstance, serviceAccountName string) { - volumeName := serviceAccountName + "-disk" - vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{ - Name: volumeName, - VolumeSource: v1.VolumeSource{ - ServiceAccount: &v1.ServiceAccountVolumeSource{ - ServiceAccountName: serviceAccountName, - }, - }, - }) - vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{ - Name: serviceAccountName + "-disk", - }) -} - -// AddWatchdog -// -// Deprecated: Use libvmi -func AddWatchdog(vmi *v1.VirtualMachineInstance, action v1.WatchdogAction) { - vmi.Spec.Domain.Devices.Watchdog = &v1.Watchdog{ - Name: "watchdog", - WatchdogDevice: v1.WatchdogDevice{ - I6300ESB: &v1.I6300ESBWatchdog{ - Action: action, - }, - }, - } -} - func NewRandomReplicaSetFromVMI(vmi *v1.VirtualMachineInstance, replicas int32) *v1.VirtualMachineInstanceReplicaSet { name := "replicaset" + rand.String(5) rs := &v1.VirtualMachineInstanceReplicaSet{ @@ -1553,19 +1353,6 @@ func UpdateKubeVirtConfigValueAndWait(kvConfig v1.KubeVirtConfiguration) *v1.Kub return kv } -// resetToDefaultConfig resets the config to the state found when the test suite started. It will wait for the config to -// be propagated to all components before it returns. It will only update the configuration and wait for it to be -// propagated if the current config in use does not match the original one. -func resetToDefaultConfig() { - if !CurrentSpecReport().IsSerial { - // Tests which alter the global kubevirt config must be run serial, therefor, if we run in parallel - // we can just skip the restore step. - return - } - - UpdateKubeVirtConfigValueAndWait(testsuite.KubeVirtDefaultConfig) -} - type compare func(string, string) bool func ExpectResourceVersionToBeLessEqualThanConfigVersion(resourceVersion, configVersion string) bool { diff --git a/tests/vm_state_test.go b/tests/vm_state_test.go index fd9a6cb25725..a948d77c6c8e 100644 --- a/tests/vm_state_test.go +++ b/tests/vm_state_test.go @@ -70,7 +70,7 @@ var _ = Describe("[sig-storage]VM state", decorators.SigStorage, decorators.Requ migrateVMI := func(vmi *v1.VirtualMachineInstance) { By("Migrating the VMI") - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) } diff --git a/tests/vmi_configuration_test.go b/tests/vmi_configuration_test.go index 0c1f4afcc5a3..d6cfe779ac0f 100644 --- a/tests/vmi_configuration_test.go +++ b/tests/vmi_configuration_test.go @@ -2203,21 +2203,18 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() { }) It("[test_id:5360]should set appropriate IO modes", func() { - vmi := tests.NewRandomVMI() - - By("adding disks to a VMI") - // disk[0]: File, sparsed, no user-input, cache=none - tests.AddEphemeralDisk(vmi, "ephemeral-disk1", v1.DiskBusVirtio, cd.ContainerDiskFor(cd.ContainerDiskCirros)) + vmi := libvmi.NewCirros( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + // disk[1]: Block, no user-input, cache=none + libvmi.WithPersistentVolumeClaim("block-pvc", dataVolume.Name), + // disk[2]: File, not-sparsed, no user-input, cache=none + libvmi.WithPersistentVolumeClaim("hostpath-pvc", tests.DiskAlpineHostPath), + // disk[3]: File, sparsed, user-input=threads, cache=none + libvmi.WithContainerDisk("ephemeral-disk2", cd.ContainerDiskFor(cd.ContainerDiskCirros)), + ) + // disk[0]cache=none vmi.Spec.Domain.Devices.Disks[0].Cache = v1.CacheNone - - // disk[1]: Block, no user-input, cache=none - tests.AddPVCDisk(vmi, "block-pvc", v1.DiskBusVirtio, dataVolume.Name) - - // disk[2]: File, not-sparsed, no user-input, cache=none - tests.AddPVCDisk(vmi, "hostpath-pvc", v1.DiskBusVirtio, tests.DiskAlpineHostPath) - - // disk[3]: File, sparsed, user-input=threads, cache=none - tests.AddEphemeralDisk(vmi, "ephemeral-disk2", v1.DiskBusVirtio, cd.ContainerDiskFor(cd.ContainerDiskCirros)) vmi.Spec.Domain.Devices.Disks[3].Cache = v1.CacheNone vmi.Spec.Domain.Devices.Disks[3].IO = v1.IOThreads @@ -2272,7 +2269,12 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() { libstorage.EventuallyDV(dataVolume, 240, Or(HaveSucceeded(), BeInPhase(cdiv1.WaitForFirstConsumer))) - vmi := tests.NewRandomVMIWithPVC(dataVolume.Name) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", dataVolume.Name), + libvmi.WithResourceMemory("128Mi"), + ) By("setting the disk to use custom block sizes") logicalSize := uint(16384) @@ -2310,7 +2312,12 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() { libstorage.EventuallyDV(dataVolume, 240, Or(HaveSucceeded(), BeInPhase(cdiv1.WaitForFirstConsumer))) - vmi := tests.NewRandomVMIWithPVC(dataVolume.Name) + vmi := libvmi.New( + libvmi.WithInterface(libvmi.InterfaceDeviceWithMasqueradeBinding()), + libvmi.WithNetwork(v1.DefaultPodNetwork()), + libvmi.WithPersistentVolumeClaim("disk0", dataVolume.Name), + libvmi.WithResourceMemory("128Mi"), + ) By("setting the disk to match the volume block sizes") vmi.Spec.Domain.Devices.Disks[0].BlockSize = &v1.BlockSize{ @@ -3054,15 +3061,12 @@ var _ = Describe("[sig-compute]Configurations", decorators.SigCompute, func() { }) Context("With ephemeral CD-ROM", func() { - var vmi *v1.VirtualMachineInstance var DiskBusIDE v1.DiskBus = "ide" - BeforeEach(func() { - vmi = libvmi.NewFedora() - }) - DescribeTable("For various bus types", func(bus v1.DiskBus, errMsg string) { - tests.AddEphemeralCdrom(vmi, "cdrom-0", bus, cd.ContainerDiskFor(cd.ContainerDiskCirros)) + vmi := libvmi.NewFedora( + libvmi.WithEphemeralCDRom("cdrom-0", bus, cd.ContainerDiskFor(cd.ContainerDiskCirros)), + ) By(fmt.Sprintf("Starting a VMI with a %s CD-ROM", bus)) _, err := virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Create(context.Background(), vmi) diff --git a/tests/vmi_hook_sidecar_test.go b/tests/vmi_hook_sidecar_test.go index daf859a9f2ed..b7ebe6ceda7f 100644 --- a/tests/vmi_hook_sidecar_test.go +++ b/tests/vmi_hook_sidecar_test.go @@ -262,7 +262,7 @@ var _ = Describe("[sig-compute]HookSidecars", decorators.SigCompute, func() { sourcePodName := sourcePod.GetObjectMeta().GetName() sourcePodUID := sourcePod.GetObjectMeta().GetUID() - migration := tests.NewRandomMigration(vmi.Name, testsuite.GetTestNamespace(vmi)) + migration := libmigration.New(vmi.Name, testsuite.GetTestNamespace(vmi)) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) targetPod, exists, err := getVMIPod(vmi) diff --git a/tests/vmi_lifecycle_test.go b/tests/vmi_lifecycle_test.go index 70c7b586a5f2..79f303a5f154 100644 --- a/tests/vmi_lifecycle_test.go +++ b/tests/vmi_lifecycle_test.go @@ -349,8 +349,8 @@ var _ = Describe("[rfe_id:273][crit:high][arm64][vendor:cnv-qe@redhat.com][level tests.AddEphemeralDisk(vmi, "disk2", v1.DiskBusVirtio, cd.ContainerDiskFor(cd.ContainerDiskCirros)) By("setting boot order") - vmi = tests.AddBootOrderToDisk(vmi, "disk0", &alpineBootOrder) - vmi = tests.AddBootOrderToDisk(vmi, "disk2", &cirrosBootOrder) + vmi = addBootOrderToDisk(vmi, "disk0", &alpineBootOrder) + vmi = addBootOrderToDisk(vmi, "disk2", &cirrosBootOrder) By("starting VMI") vmi = tests.RunVMIAndExpectLaunch(vmi, 60) @@ -1832,3 +1832,13 @@ func nowAsJSONWithOffset(offset time.Duration) string { Expect(err).ToNot(HaveOccurred(), "Should marshal to json") return strings.Trim(string(data), `"`) } + +func addBootOrderToDisk(vmi *v1.VirtualMachineInstance, diskName string, bootorder *uint) *v1.VirtualMachineInstance { + for i, d := range vmi.Spec.Domain.Devices.Disks { + if d.Name == diskName { + vmi.Spec.Domain.Devices.Disks[i].BootOrder = bootorder + return vmi + } + } + return vmi +} diff --git a/tests/vmi_tpm_test.go b/tests/vmi_tpm_test.go index 6588d6e6ea90..2fa2fbf808ef 100644 --- a/tests/vmi_tpm_test.go +++ b/tests/vmi_tpm_test.go @@ -76,7 +76,7 @@ var _ = Describe("[sig-compute]vTPM", decorators.SigCompute, decorators.Requires By("Migrating the VMI") checks.SkipIfMigrationIsNotPossible() - migration := tests.NewRandomMigration(vmi.Name, vmi.Namespace) + migration := libmigration.New(vmi.Name, vmi.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) By("Ensuring the TPM is still functional and its state carried over") diff --git a/tests/vmi_vsock_test.go b/tests/vmi_vsock_test.go index da439b1089b4..3d10503f46e1 100644 --- a/tests/vmi_vsock_test.go +++ b/tests/vmi_vsock_test.go @@ -153,7 +153,7 @@ var _ = Describe("[sig-compute]VSOCK", Serial, decorators.SigCompute, func() { By("Migrating the 2nd VMI") checks.SkipIfMigrationIsNotPossible() - migration := tests.NewRandomMigration(vmi2.Name, vmi2.Namespace) + migration := libmigration.New(vmi2.Name, vmi2.Namespace) libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) domain2, err = tests.GetRunningVirtualMachineInstanceDomainXML(virtClient, vmi2)