From f2cfbf44b1fb482671aedbfff820ae2af256a389 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 20 Jun 2023 10:27:14 +0200 Subject: [PATCH] e2e: use framework labels This changes the text registration so that tags for which the framework has a dedicated API (features, feature gates, slow, serial, etc.) those APIs are used. Arbitrary, custom tags are still left in place for now. --- test/conformance/testdata/conformance.yaml | 2 +- test/e2e/README.md | 2 +- test/e2e/apimachinery/apiserver_identity.go | 6 +- test/e2e/apimachinery/chunking.go | 2 +- test/e2e/apimachinery/etcd_failure.go | 2 +- test/e2e/apimachinery/namespace.go | 5 +- test/e2e/apimachinery/resource_quota.go | 5 +- test/e2e/apimachinery/storage_version.go | 3 +- test/e2e/apimachinery/watchlist.go | 3 +- test/e2e/apps/controller_revision.go | 2 +- test/e2e/apps/cronjob.go | 4 +- test/e2e/apps/daemon_restart.go | 2 +- test/e2e/apps/daemon_set.go | 2 +- test/e2e/apps/disruption.go | 8 +- test/e2e/apps/job.go | 2 +- test/e2e/apps/replica_set.go | 2 +- test/e2e/apps/statefulset.go | 9 +- test/e2e/auth/node_authn.go | 3 +- test/e2e/auth/node_authz.go | 3 +- test/e2e/auth/service_accounts.go | 5 +- test/e2e/autoscaling/autoscaling_timer.go | 3 +- .../cluster_autoscaler_scalability.go | 15 +-- .../autoscaling/cluster_size_autoscaling.go | 65 ++++++------ .../custom_metrics_stackdriver_autoscaling.go | 3 +- test/e2e/autoscaling/dns_autoscaling.go | 2 +- .../autoscaling/horizontal_pod_autoscaling.go | 25 ++--- .../horizontal_pod_autoscaling_behavior.go | 3 +- test/e2e/cloud/gcp/apps/stateful_apps.go | 3 +- ..._account_admission_controller_migration.go | 3 +- test/e2e/cloud/gcp/cluster_upgrade.go | 11 ++- test/e2e/cloud/gcp/gke_node_pools.go | 5 +- test/e2e/cloud/gcp/ha_master.go | 9 +- test/e2e/cloud/gcp/kubelet_security.go | 3 +- .../cloud/gcp/network/kube_proxy_migration.go | 7 +- test/e2e/cloud/gcp/node/gpu.go | 9 +- test/e2e/cloud/gcp/node_lease.go | 2 +- test/e2e/cloud/gcp/reboot.go | 3 +- test/e2e/cloud/gcp/recreate_node.go | 3 +- test/e2e/cloud/gcp/resize_nodes.go | 4 +- test/e2e/cloud/gcp/restart.go | 2 +- test/e2e/cloud/nodes.go | 3 +- test/e2e/common/network/networking.go | 13 +-- test/e2e/common/node/configmap.go | 4 +- test/e2e/common/node/container_probe.go | 40 ++++---- test/e2e/common/node/containers.go | 8 +- test/e2e/common/node/downwardapi.go | 13 +-- test/e2e/common/node/ephemeral_containers.go | 2 +- test/e2e/common/node/expansion.go | 14 +-- .../common/node/image_credential_provider.go | 3 +- test/e2e/common/node/init_container.go | 2 +- test/e2e/common/node/kubelet.go | 10 +- test/e2e/common/node/kubelet_etc_hosts.go | 2 +- test/e2e/common/node/lifecycle_hook.go | 17 ++-- test/e2e/common/node/pod_admission.go | 2 +- test/e2e/common/node/pods.go | 20 ++-- test/e2e/common/node/privileged.go | 2 +- test/e2e/common/node/runtime.go | 22 ++--- test/e2e/common/node/runtimeclass.go | 13 +-- test/e2e/common/node/secrets.go | 4 +- test/e2e/common/node/security_context.go | 28 +++--- test/e2e/common/node/sysctl.go | 7 +- test/e2e/common/storage/configmap_volume.go | 31 +++--- test/e2e/common/storage/downwardapi.go | 3 +- test/e2e/common/storage/downwardapi_volume.go | 27 ++--- test/e2e/common/storage/empty_dir.go | 32 +++--- test/e2e/common/storage/host_path.go | 6 +- test/e2e/common/storage/projected_combined.go | 2 +- .../e2e/common/storage/projected_configmap.go | 29 +++--- .../common/storage/projected_downwardapi.go | 27 ++--- test/e2e/common/storage/projected_secret.go | 20 ++-- test/e2e/common/storage/secrets_volume.go | 20 ++-- test/e2e/dra/dra.go | 5 +- test/e2e/feature/feature.go | 1 + .../instrumentation/logging/generic_soak.go | 2 +- .../instrumentation/monitoring/accelerator.go | 3 +- .../monitoring/custom_metrics_stackdriver.go | 7 +- .../instrumentation/monitoring/stackdriver.go | 3 +- .../monitoring/stackdriver_metadata_agent.go | 3 +- test/e2e/kubectl/kubectl.go | 8 +- .../lifecycle/bootstrap/bootstrap_signer.go | 5 +- .../bootstrap/bootstrap_token_cleaner.go | 3 +- test/e2e/network/dns_configmap.go | 6 +- test/e2e/network/dns_scale_records.go | 3 +- test/e2e/network/dual_stack.go | 7 +- test/e2e/network/example_cluster_dns.go | 3 +- test/e2e/network/firewall.go | 2 +- test/e2e/network/ingress.go | 5 +- test/e2e/network/ingress_scale.go | 3 +- test/e2e/network/ingressclass.go | 11 ++- test/e2e/network/loadbalancer.go | 26 ++--- test/e2e/network/netpol/network_policy.go | 99 ++++++++++--------- test/e2e/network/network_tiers.go | 4 +- test/e2e/network/networking.go | 21 ++-- test/e2e/network/networking_perf.go | 3 +- test/e2e/network/no_snat.go | 3 +- test/e2e/network/service.go | 6 +- test/e2e/network/service_cidrs.go | 3 +- test/e2e/network/topology_hints.go | 3 +- test/e2e/node/examples.go | 3 +- test/e2e/node/kubelet.go | 3 +- test/e2e/node/kubelet_perf.go | 7 +- test/e2e/node/pod_gc.go | 4 +- test/e2e/node/pod_resize.go | 5 +- test/e2e/node/runtimeclass.go | 2 +- test/e2e/node/taints.go | 8 +- test/e2e/nodefeature/nodefeature.go | 2 +- test/e2e/scheduling/nvidia-gpus.go | 5 +- test/e2e/scheduling/predicates.go | 5 +- test/e2e/scheduling/preemption.go | 2 +- test/e2e/scheduling/priorities.go | 2 +- test/e2e/scheduling/ubernetes_lite.go | 4 +- .../e2e/storage/csi_mock/csi_attach_volume.go | 2 +- .../csi_mock/csi_node_stage_error_cases.go | 4 +- .../e2e/storage/csi_mock/csi_selinux_mount.go | 5 +- test/e2e/storage/csi_mock/csi_snapshot.go | 7 +- .../storage/csi_mock/csi_volume_expansion.go | 3 +- test/e2e/storage/csi_mock/csi_volume_limit.go | 6 +- test/e2e/storage/detach_mounted.go | 5 +- test/e2e/storage/empty_dir_wrapper.go | 4 +- test/e2e/storage/flexvolume.go | 3 +- .../flexvolume_mounted_volume_resize.go | 3 +- test/e2e/storage/flexvolume_online_resize.go | 3 +- .../generic_persistent_volume-disruptive.go | 2 +- test/e2e/storage/host_path_type.go | 10 +- test/e2e/storage/mounted_volume_resize.go | 3 +- .../nfs_persistent_volume-disruptive.go | 2 +- .../e2e/storage/non_graceful_node_shutdown.go | 3 +- test/e2e/storage/pd.go | 11 ++- test/e2e/storage/persistent_volumes-gce.go | 3 +- test/e2e/storage/persistent_volumes-local.go | 19 ++-- test/e2e/storage/persistent_volumes.go | 11 ++- test/e2e/storage/pvc_storageclass.go | 2 +- test/e2e/storage/regional_pd.go | 10 +- test/e2e/storage/static_pods.go | 5 +- test/e2e/storage/testsuites/disruptive.go | 11 ++- test/e2e/storage/testsuites/multivolume.go | 5 +- test/e2e/storage/testsuites/provisioning.go | 11 ++- .../testsuites/snapshottable_stress.go | 2 +- test/e2e/storage/testsuites/subpath.go | 20 ++-- test/e2e/storage/testsuites/volume_io.go | 2 +- test/e2e/storage/testsuites/volume_stress.go | 2 +- test/e2e/storage/testsuites/volumelimits.go | 2 +- test/e2e/storage/testsuites/volumemode.go | 6 +- test/e2e/storage/testsuites/volumeperf.go | 2 +- test/e2e/storage/volume_metrics.go | 4 +- test/e2e/storage/volume_provisioning.go | 11 ++- .../vsphere/persistent_volumes-vsphere.go | 7 +- test/e2e/storage/vsphere/pv_reclaimpolicy.go | 5 +- .../e2e/storage/vsphere/pvc_label_selector.go | 5 +- test/e2e/storage/vsphere/vsphere_scale.go | 3 +- .../storage/vsphere/vsphere_statefulsets.go | 3 +- test/e2e/storage/vsphere/vsphere_stress.go | 3 +- .../vsphere/vsphere_volume_cluster_ds.go | 3 +- .../vsphere/vsphere_volume_datastore.go | 3 +- .../vsphere/vsphere_volume_diskformat.go | 3 +- .../vsphere/vsphere_volume_disksize.go | 3 +- .../storage/vsphere/vsphere_volume_fstype.go | 3 +- .../vsphere/vsphere_volume_master_restart.go | 3 +- .../vsphere/vsphere_volume_node_delete.go | 3 +- .../vsphere/vsphere_volume_node_poweroff.go | 3 +- .../vsphere/vsphere_volume_ops_storm.go | 3 +- .../storage/vsphere/vsphere_volume_perf.go | 3 +- .../vsphere/vsphere_volume_placement.go | 3 +- .../vsphere/vsphere_volume_vpxd_restart.go | 3 +- .../vsphere/vsphere_volume_vsan_policy.go | 3 +- .../storage/vsphere/vsphere_zone_support.go | 3 +- test/e2e/windows/cpu_limits.go | 3 +- test/e2e/windows/density.go | 3 +- test/e2e/windows/device_plugin.go | 3 +- test/e2e/windows/dns.go | 3 +- test/e2e/windows/gmsa_full.go | 3 +- test/e2e/windows/gmsa_kubelet.go | 3 +- test/e2e/windows/host_process.go | 3 +- test/e2e/windows/hybrid_network.go | 5 +- test/e2e/windows/hyperv.go | 3 +- test/e2e/windows/kubelet_stats.go | 5 +- test/e2e/windows/memory_limits.go | 3 +- test/e2e/windows/reboot_node.go | 3 +- test/e2e/windows/security_context.go | 3 +- test/e2e/windows/volumes.go | 3 +- test/e2e_node/apparmor_test.go | 4 +- test/e2e_node/checkpoint_container.go | 3 +- test/e2e_node/container_lifecycle_test.go | 10 +- test/e2e_node/container_log_rotation_test.go | 2 +- test/e2e_node/container_manager_test.go | 5 +- test/e2e_node/cpu_manager_metrics_test.go | 3 +- test/e2e_node/cpu_manager_test.go | 3 +- test/e2e_node/critical_pod_test.go | 5 +- test/e2e_node/deleted_pods_test.go | 2 +- test/e2e_node/density_test.go | 2 +- test/e2e_node/device_manager_test.go | 6 +- test/e2e_node/device_plugin_test.go | 8 +- test/e2e_node/dra_test.go | 7 +- test/e2e_node/eviction_test.go | 24 ++--- test/e2e_node/garbage_collector_test.go | 3 +- test/e2e_node/hugepages_test.go | 3 +- test/e2e_node/image_id_test.go | 3 +- test/e2e_node/lock_contention_linux_test.go | 2 +- test/e2e_node/log_path_test.go | 2 +- test/e2e_node/memory_manager_test.go | 3 +- test/e2e_node/mirror_pod_grace_period_test.go | 8 +- test/e2e_node/mirror_pod_test.go | 10 +- test/e2e_node/node_container_manager_test.go | 5 +- test/e2e_node/node_perf_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 3 +- test/e2e_node/node_shutdown_linux_test.go | 5 +- test/e2e_node/oomkiller_linux_test.go | 4 +- test/e2e_node/os_label_rename_test.go | 2 +- test/e2e_node/pids_test.go | 2 +- test/e2e_node/pod_conditions_test.go | 3 +- test/e2e_node/pod_host_ips.go | 4 +- test/e2e_node/pod_hostnamefqdn_test.go | 2 +- test/e2e_node/podresources_test.go | 6 +- test/e2e_node/pods_container_manager_test.go | 4 +- .../pods_lifecycle_termination_test.go | 2 +- test/e2e_node/quota_lsci_test.go | 4 +- test/e2e_node/resource_metrics_test.go | 3 +- test/e2e_node/resource_usage_test.go | 2 +- test/e2e_node/restart_test.go | 2 +- test/e2e_node/runtime_conformance_test.go | 2 +- test/e2e_node/seccompdefault_test.go | 3 +- test/e2e_node/security_context_test.go | 15 +-- test/e2e_node/standalone_test.go | 3 +- test/e2e_node/summary_test.go | 2 +- test/e2e_node/swap_test.go | 7 +- test/e2e_node/system_node_critical_test.go | 3 +- .../e2e_node/topology_manager_metrics_test.go | 3 +- test/e2e_node/topology_manager_test.go | 3 +- test/e2e_node/unknown_pods_test.go | 2 +- test/e2e_node/volume_manager_test.go | 2 +- 230 files changed, 834 insertions(+), 680 deletions(-) diff --git a/test/conformance/testdata/conformance.yaml b/test/conformance/testdata/conformance.yaml index 283b513f8df56..f512bd803bb49 100755 --- a/test/conformance/testdata/conformance.yaml +++ b/test/conformance/testdata/conformance.yaml @@ -3035,7 +3035,7 @@ file: test/e2e/storage/persistent_volumes.go - testname: Projected Volume, multiple projections codename: '[sig-storage] Projected combined should project all components that make - up the projection API [Projection][NodeConformance] [Conformance]' + up the projection API [Projection] [NodeConformance] [Conformance]' description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory diff --git a/test/e2e/README.md b/test/e2e/README.md index 656c86eceec53..e3c1e356841f7 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -59,7 +59,7 @@ import ( "github.com/onsi/ginkgo" "k8s.io/kubernetes/test/e2e/lifecycle" ) -var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { +var _ = lifecycle.SIGDescribe("cluster", feature.BootstrapTokens, func() { /* ... */ ginkgo.It("should sign the new added bootstrap tokens", func(ctx context.Context) { /* ... */ diff --git a/test/e2e/apimachinery/apiserver_identity.go b/test/e2e/apimachinery/apiserver_identity.go index ef60153b9dbb0..4d6a05b0216a5 100644 --- a/test/e2e/apimachinery/apiserver_identity.go +++ b/test/e2e/apimachinery/apiserver_identity.go @@ -26,13 +26,13 @@ import ( "strings" "time" - "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "golang.org/x/crypto/cryptobyte" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -80,11 +80,11 @@ func restartAPIServer(ctx context.Context, node *v1.Node) error { } // This test requires that --feature-gates=APIServerIdentity=true be set on the apiserver -var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func() { +var _ = SIGDescribe("kube-apiserver identity", feature.APIServerIdentity, func() { f := framework.NewDefaultFramework("kube-apiserver-identity") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - ginkgo.It("kube-apiserver identity should persist after restart [Disruptive]", func(ctx context.Context) { + f.It("kube-apiserver identity should persist after restart", f.WithDisruptive(), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce") client := f.ClientSet diff --git a/test/e2e/apimachinery/chunking.go b/test/e2e/apimachinery/chunking.go index c9d091498c257..5c6620f9d1349 100644 --- a/test/e2e/apimachinery/chunking.go +++ b/test/e2e/apimachinery/chunking.go @@ -141,7 +141,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() { MUST return the remaining items in chunks of the size not exceeding the limit, with appropriately set RemainingItems field in the response and with the ResourceVersion returned as part of the inconsistent list. */ - framework.ConformanceIt("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func(ctx context.Context) { + framework.ConformanceIt("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", f.WithSlow(), func(ctx context.Context) { ns := f.Namespace.Name c := f.ClientSet client := c.CoreV1().PodTemplates(ns) diff --git a/test/e2e/apimachinery/etcd_failure.go b/test/e2e/apimachinery/etcd_failure.go index bb44cb69d0304..cecef2c1d324e 100644 --- a/test/e2e/apimachinery/etcd_failure.go +++ b/test/e2e/apimachinery/etcd_failure.go @@ -36,7 +36,7 @@ import ( "github.com/onsi/ginkgo/v2" ) -var _ = SIGDescribe("Etcd failure [Disruptive]", func() { +var _ = SIGDescribe("Etcd failure", framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("etcd-failure") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/apimachinery/namespace.go b/test/e2e/apimachinery/namespace.go index ed3b45be33c9f..121f4be1cc30d 100644 --- a/test/e2e/apimachinery/namespace.go +++ b/test/e2e/apimachinery/namespace.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" @@ -231,7 +232,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(ctx context.Context, f *fram // that each have a variable amount of content in the associated Namespace. // When run in [Serial] this test appears to delete Namespace objects at a // rate of approximately 1 per second. -var _ = SIGDescribe("Namespaces [Serial]", func() { +var _ = SIGDescribe("Namespaces", framework.WithSerial(), func() { f := framework.NewDefaultFramework("namespaces") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline @@ -259,7 +260,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() { }) // On hold until etcd3; see #7372 - ginkgo.It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]", func(ctx context.Context) { + f.It("should always delete fast (ALL of 100 namespaces in 150 seconds)", feature.ComprehensiveNamespaceDraining, func(ctx context.Context) { extinguish(ctx, f, 100, 0, 150) }) diff --git a/test/e2e/apimachinery/resource_quota.go b/test/e2e/apimachinery/resource_quota.go index 0293117d8cc11..e9f1fcf0940cb 100644 --- a/test/e2e/apimachinery/resource_quota.go +++ b/test/e2e/apimachinery/resource_quota.go @@ -46,6 +46,7 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" "k8s.io/kubernetes/pkg/quota/v1/evaluator/core" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/utils/crd" imageutils "k8s.io/kubernetes/test/utils/image" @@ -1203,7 +1204,7 @@ var _ = SIGDescribe("ResourceQuota", func() { }) }) -var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { +var _ = SIGDescribe("ResourceQuota", feature.ScopeSelectors, func() { f := framework.NewDefaultFramework("scope-selectors") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func(ctx context.Context) { @@ -1384,7 +1385,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() { }) }) -var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() { +var _ = SIGDescribe("ResourceQuota", feature.PodPriority, func() { f := framework.NewDefaultFramework("resourcequota-priorityclass") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e/apimachinery/storage_version.go b/test/e2e/apimachinery/storage_version.go index f4f4ee90b0983..b792f766a7782 100644 --- a/test/e2e/apimachinery/storage_version.go +++ b/test/e2e/apimachinery/storage_version.go @@ -24,6 +24,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" admissionapi "k8s.io/pod-security-admission/api" @@ -36,7 +37,7 @@ const ( ) // This test requires that --feature-gates=APIServerIdentity=true,StorageVersionAPI=true be set on the apiserver and the controller manager -var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func() { +var _ = SIGDescribe("StorageVersion resources", feature.StorageVersionAPI, func() { f := framework.NewDefaultFramework("storage-version") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/apimachinery/watchlist.go b/test/e2e/apimachinery/watchlist.go index 754dd8d95647c..44721651a89fe 100644 --- a/test/e2e/apimachinery/watchlist.go +++ b/test/e2e/apimachinery/watchlist.go @@ -32,10 +32,11 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" ) -var _ = SIGDescribe("API Streaming (aka. WatchList) [Serial] [Feature:WatchList]", func() { +var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), feature.WatchList, func() { f := framework.NewDefaultFramework("watchlist") ginkgo.It("should be requested when ENABLE_CLIENT_GO_WATCH_LIST_ALPHA is set", func(ctx context.Context) { prevWatchListEnvValue, wasWatchListEnvSet := os.LookupEnv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA") diff --git a/test/e2e/apps/controller_revision.go b/test/e2e/apps/controller_revision.go index 5630b9d57982e..f4194c92cb368 100644 --- a/test/e2e/apps/controller_revision.go +++ b/test/e2e/apps/controller_revision.go @@ -54,7 +54,7 @@ const ( // happen. In the future, running in parallel may work if we have an eviction // model which lets the DS controller kick out other pods to make room. // See https://issues.k8s.io/21767 for more details -var _ = SIGDescribe("ControllerRevision [Serial]", func() { +var _ = SIGDescribe("ControllerRevision", framework.WithSerial(), func() { var f *framework.Framework ginkgo.AfterEach(func(ctx context.Context) { diff --git a/test/e2e/apps/cronjob.go b/test/e2e/apps/cronjob.go index a363feafc4501..b83d368d3bc1b 100644 --- a/test/e2e/apps/cronjob.go +++ b/test/e2e/apps/cronjob.go @@ -94,7 +94,7 @@ var _ = SIGDescribe("CronJob", func() { Testname: CronJob Suspend Description: CronJob MUST support suspension, which suppresses creation of new jobs. */ - framework.ConformanceIt("should not schedule jobs when suspended [Slow]", func(ctx context.Context) { + framework.ConformanceIt("should not schedule jobs when suspended", f.WithSlow(), func(ctx context.Context) { ginkgo.By("Creating a suspended cronjob") cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1.AllowConcurrent, sleepCommand, nil, nil) @@ -122,7 +122,7 @@ var _ = SIGDescribe("CronJob", func() { Testname: CronJob ForbidConcurrent Description: CronJob MUST support ForbidConcurrent policy, allowing to run single, previous job at the time. */ - framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent [Slow]", func(ctx context.Context) { + framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent", f.WithSlow(), func(ctx context.Context) { ginkgo.By("Creating a ForbidConcurrent cronjob") cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent, sleepCommand, nil, nil) diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 9d154553103d4..19cf5bc8945dc 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -205,7 +205,7 @@ func getContainerRestarts(ctx context.Context, c clientset.Interface, ns string, return failedContainers, containerRestartNodes.List() } -var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { +var _ = SIGDescribe("DaemonRestart", framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("daemonrestart") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index fc8079d72f113..0d06247088e8c 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -116,7 +116,7 @@ func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, name // happen. In the future, running in parallel may work if we have an eviction // model which lets the DS controller kick out other pods to make room. // See https://issues.k8s.io/21767 for more details -var _ = SIGDescribe("Daemon set [Serial]", func() { +var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() { var f *framework.Framework ginkgo.AfterEach(func(ctx context.Context) { diff --git a/test/e2e/apps/disruption.go b/test/e2e/apps/disruption.go index a23f0d2f57478..0bdf852166798 100644 --- a/test/e2e/apps/disruption.go +++ b/test/e2e/apps/disruption.go @@ -283,11 +283,11 @@ var _ = SIGDescribe("DisruptionController", func() { // tests with exclusive set to true relies on HostPort to make sure // only one pod from the replicaset is assigned to each node. This // requires these tests to be run serially. - var serial string + args := []interface{}{fmt.Sprintf("evictions: %s => %s", c.description, expectation)} if c.exclusive { - serial = " [Serial]" + args = append(args, framework.WithSerial()) } - ginkgo.It(fmt.Sprintf("evictions: %s => %s%s", c.description, expectation, serial), func(ctx context.Context) { + f.It(append(args, func(ctx context.Context) { if c.skipForBigClusters { e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1) } @@ -338,7 +338,7 @@ var _ = SIGDescribe("DisruptionController", func() { }) framework.ExpectNoError(err) } - }) + })...) } /* diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 39677f18b2d48..0825237f5ac22 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -652,7 +652,7 @@ done`} } }) - ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) { + f.It("should run a job to completion with CPU requests", f.WithSerial(), func(ctx context.Context) { ginkgo.By("Creating a job that with CPU requests") testNodeName := scheduling.GetNodeThatCanRunPod(ctx, f) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 82b78e9eb196a..04a848a545451 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -498,7 +498,7 @@ func testRSLifeCycle(ctx context.Context, f *framework.Framework) { framework.ExpectNoError(err, "Failed to create pods: %s", err) // Scale the ReplicaSet - ginkgo.By(fmt.Sprintf("Scaling up %q replicaset ", rsName)) + ginkgo.By(fmt.Sprintf("Scaling up %q replicaset", rsName)) _, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(update *appsv1.ReplicaSet) { x := int32(2) update.Spec.Replicas = &x diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index a59e4789be11a..91b795cf71e92 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -47,6 +47,7 @@ import ( "k8s.io/client-go/tools/cache" watchtools "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -637,7 +638,7 @@ var _ = SIGDescribe("StatefulSet", func() { Testname: StatefulSet, Scaling Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ - framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func(ctx context.Context) { + framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", f.WithSlow(), func(ctx context.Context) { psLabels := klabels.Set(labels) w := &cache.ListWatch{ WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { @@ -747,7 +748,7 @@ var _ = SIGDescribe("StatefulSet", func() { Testname: StatefulSet, Burst Scaling Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner. */ - framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods [Slow]", func(ctx context.Context) { + framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", f.WithSlow(), func(ctx context.Context) { psLabels := klabels.Set(labels) ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns) @@ -1154,7 +1155,7 @@ var _ = SIGDescribe("StatefulSet", func() { }) }) - ginkgo.Describe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() { + f.Describe("Deploy clustered applications", feature.StatefulSet, framework.WithSlow(), func() { var appTester *clusterAppTester ginkgo.BeforeEach(func(ctx context.Context) { @@ -1424,7 +1425,7 @@ var _ = SIGDescribe("StatefulSet", func() { e2estatefulset.DeleteAllStatefulSets(ctx, c, ns) }) - ginkgo.It("PVC should be recreated when pod is pending due to missing PVC [Disruptive][Serial]", func(ctx context.Context) { + f.It("PVC should be recreated when pod is pending due to missing PVC", f.WithDisruptive(), f.WithSerial(), func(ctx context.Context) { e2epv.SkipIfNoDefaultStorageClass(ctx, c) readyNode, err := e2enode.GetRandomReadySchedulableNode(ctx, c) diff --git a/test/e2e/auth/node_authn.go b/test/e2e/auth/node_authn.go index 7527e49e19e6c..63beb0223dd6e 100644 --- a/test/e2e/auth/node_authn.go +++ b/test/e2e/auth/node_authn.go @@ -25,6 +25,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/cluster/ports" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -35,7 +36,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() { +var _ = SIGDescribe(feature.NodeAuthenticator, func() { f := framework.NewDefaultFramework("node-authn") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e/auth/node_authz.go b/test/e2e/auth/node_authz.go index 4152313b816ba..ed43d48a3a488 100644 --- a/test/e2e/auth/node_authz.go +++ b/test/e2e/auth/node_authz.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -41,7 +42,7 @@ const ( nodeNamePrefix = "system:node:" ) -var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() { +var _ = SIGDescribe(feature.NodeAuthorizer, func() { f := framework.NewDefaultFramework("node-authz") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 77337a49f2ffb..654600d7e324c 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -43,6 +43,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" utilptr "k8s.io/utils/pointer" @@ -334,7 +335,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Containers MUST verify that the projected service account token can be read and has correct file mode set including ownership and permission. */ - ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { e2eskipper.SkipIfNodeOSDistroIs("windows") var ( @@ -430,7 +431,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { } }) - ginkgo.It("should support InClusterConfig with token rotation [Slow]", func(ctx context.Context) { + f.It("should support InClusterConfig with token rotation", f.WithSlow(), func(ctx context.Context) { tenMin := int64(10 * 60) pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"}, diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index 32ca926b85c4e..9acf09f751f72 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -34,7 +35,7 @@ import ( "github.com/onsi/gomega/gmeasure" ) -var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() { +var _ = SIGDescribe(feature.ClusterSizeAutoscalingScaleUp, framework.WithSlow(), "Autoscaling", func() { f := framework.NewDefaultFramework("autoscaling") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var experiment *gmeasure.Experiment diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 058a40724f667..2691f17f2b93f 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2erc "k8s.io/kubernetes/test/e2e/framework/rc" @@ -61,7 +62,7 @@ type scaleUpTestConfig struct { expectedResult *clusterPredicates } -var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { +var _ = SIGDescribe("Cluster size autoscaler scalability", framework.WithSlow(), func() { f := framework.NewDefaultFramework("autoscaling") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var c clientset.Interface @@ -138,7 +139,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { klog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) }) - ginkgo.It("should scale up at all [Feature:ClusterAutoscalerScalability1]", func(ctx context.Context) { + f.It("should scale up at all", feature.ClusterAutoscalerScalability1, func(ctx context.Context) { perNodeReservation := int(float64(memCapacityMb) * 0.95) replicasPerNode := 10 @@ -161,7 +162,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { defer testCleanup() }) - ginkgo.It("should scale up twice [Feature:ClusterAutoscalerScalability2]", func(ctx context.Context) { + f.It("should scale up twice", feature.ClusterAutoscalerScalability2, func(ctx context.Context) { perNodeReservation := int(float64(memCapacityMb) * 0.95) replicasPerNode := 10 additionalNodes1 := int(math.Ceil(0.7 * maxNodes)) @@ -210,7 +211,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { klog.Infof("Scaled up twice") }) - ginkgo.It("should scale down empty nodes [Feature:ClusterAutoscalerScalability3]", func(ctx context.Context) { + f.It("should scale down empty nodes", feature.ClusterAutoscalerScalability3, func(ctx context.Context) { perNodeReservation := int(float64(memCapacityMb) * 0.7) replicas := int(math.Ceil(maxNodes * 0.7)) totalNodes := maxNodes @@ -238,7 +239,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { }, scaleDownTimeout)) }) - ginkgo.It("should scale down underutilized nodes [Feature:ClusterAutoscalerScalability4]", func(ctx context.Context) { + f.It("should scale down underutilized nodes", feature.ClusterAutoscalerScalability4, func(ctx context.Context) { perPodReservation := int(float64(memCapacityMb) * 0.01) // underutilizedNodes are 10% full underutilizedPerNodeReplicas := 10 @@ -296,7 +297,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { }, timeout)) }) - ginkgo.It("shouldn't scale down with underutilized nodes due to host port conflicts [Feature:ClusterAutoscalerScalability5]", func(ctx context.Context) { + f.It("shouldn't scale down with underutilized nodes due to host port conflicts", feature.ClusterAutoscalerScalability5, func(ctx context.Context) { fullReservation := int(float64(memCapacityMb) * 0.9) hostPortPodReservation := int(float64(memCapacityMb) * 0.3) totalNodes := maxNodes @@ -333,7 +334,7 @@ var _ = SIGDescribe("Cluster size autoscaler scalability [Slow]", func() { gomega.Expect(nodes.Items).To(gomega.HaveLen(totalNodes)) }) - ginkgo.It("CA ignores unschedulable pods while scheduling schedulable pods [Feature:ClusterAutoscalerScalability6]", func(ctx context.Context) { + f.It("CA ignores unschedulable pods while scheduling schedulable pods", feature.ClusterAutoscalerScalability6, func(ctx context.Context) { // Start a number of pods saturating existing nodes. perNodeReservation := int(float64(memCapacityMb) * 0.80) replicasPerNode := 10 diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index f8a830bfd95f2..f9e17020c780b 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -43,6 +43,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2emanifest "k8s.io/kubernetes/test/e2e/framework/manifest" @@ -93,7 +94,7 @@ const ( gpuLabel = "cloud.google.com/gke-accelerator" ) -var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { +var _ = SIGDescribe("Cluster size autoscaling", framework.WithSlow(), func() { f := framework.NewDefaultFramework("autoscaling") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var c clientset.Interface @@ -166,7 +167,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { klog.Infof("Made nodes schedulable again in %v", time.Since(s).String()) }) - ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("shouldn't increase cluster size if pending pod is too large", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { ginkgo.By("Creating unschedulable pod") ReserveMemory(ctx, f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "memory-reservation") @@ -206,13 +207,13 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) } - ginkgo.It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should increase cluster size if pending pods are small", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { simpleScaleUpTest(ctx, 0) }) gpuType := os.Getenv("TESTED_GPU_TYPE") - ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { + f.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s]", gpuType), feature.ClusterSizeAutoscalingGpu, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -239,7 +240,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { gomega.Expect(getPoolNodes(ctx, f, gpuPoolName)).To(gomega.HaveLen(1)) }) - ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { + f.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s]", gpuType), feature.ClusterSizeAutoscalingGpu, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -269,7 +270,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { gomega.Expect(getPoolNodes(ctx, f, gpuPoolName)).To(gomega.HaveLen(2)) }) - ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { + f.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s]", gpuType), feature.ClusterSizeAutoscalingGpu, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -298,7 +299,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { gomega.Expect(getPoolNodes(ctx, f, gpuPoolName)).To(gomega.BeEmpty()) }) - ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func(ctx context.Context) { + f.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s]", gpuType), feature.ClusterSizeAutoscalingGpu, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") if gpuType == "" { framework.Failf("TEST_GPU_TYPE not defined") @@ -328,11 +329,11 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { gomega.Expect(getPoolNodes(ctx, f, gpuPoolName)).To(gomega.BeEmpty()) }) - ginkgo.It("should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should increase cluster size if pending pods are small and one node is broken", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { e2enetwork.TestUnderTemporaryNetworkFailure(ctx, c, "default", getAnyNode(ctx, c), func(ctx context.Context) { simpleScaleUpTest(ctx, 1) }) }) - ginkgo.It("shouldn't trigger additional scale-ups during processing scale-up [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("shouldn't trigger additional scale-ups during processing scale-up", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { // Wait for the situation to stabilize - CA should be running and have up-to-date node readiness info. status, err := waitForScaleUpStatus(ctx, c, func(s *scaleUpStatus) bool { return s.ready == s.target && s.ready <= nodeCount @@ -372,7 +373,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { gomega.Expect(nodes.Items).To(gomega.HaveLen(status.target + unmanagedNodes)) }) - ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("Creating new node-pool with n1-standard-4 machines") @@ -406,7 +407,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) }) - ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should disable node pool autoscaling", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("Creating new node-pool with n1-standard-4 machines") @@ -419,7 +420,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2)) }) - ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should increase cluster size if pods are pending due to host port conflict", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { scheduling.CreateHostPortPods(ctx, f, "host-port", nodeCount+2, false) ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, "host-port") @@ -428,7 +429,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(ctx, f, c)) }) - ginkgo.It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should increase cluster size if pods are pending due to pod anti-affinity", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { pods := nodeCount newPods := 2 labels := map[string]string{ @@ -447,7 +448,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+newPods, scaleUpTimeout)) }) - ginkgo.It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should increase cluster size if pod requesting EmptyDir volume is pending", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { ginkgo.By("creating pods") pods := nodeCount newPods := 1 @@ -468,7 +469,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+newPods, scaleUpTimeout)) }) - ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should increase cluster size if pod requesting volume is pending", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") volumeLabels := labels.Set{ @@ -538,7 +539,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+newPods, scaleUpTimeout)) }) - ginkgo.It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should add node to the particular mig", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { labelKey := "cluster-autoscaling-test.special-node" labelValue := "true" @@ -638,7 +639,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, "node-selector")) }) - ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should scale up correct target pool", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") ginkgo.By("Creating new node-pool with n1-standard-4 machines") @@ -683,15 +684,15 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size < increasedSize }, scaleDownTimeout, unready)) } - ginkgo.It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]", + f.It("should correctly scale down after a node is not needed", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { simpleScaleDownTest(ctx, 0) }) - ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("should correctly scale down after a node is not needed and one node is broken", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() e2enetwork.TestUnderTemporaryNetworkFailure(ctx, c, "default", getAnyNode(ctx, c), func(ctx context.Context) { simpleScaleDownTest(ctx, 1) }) }) - ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("should correctly scale down after a node is not needed when there is non autoscaled pool", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gke") increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) @@ -713,7 +714,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size < increasedSize+extraNodes }, scaleDownTimeout+10*time.Minute)) }) - ginkgo.It("should be able to scale down when rescheduling a pod is required and pdb allows for it[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("should be able to scale down when rescheduling a pod is required and pdb allows for it", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { runDrainTest(ctx, f, originalSizes, f.Namespace.Name, 1, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, @@ -721,7 +722,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("shouldn't be able to scale down when rescheduling a pod is required, but pdb doesn't allow drain", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { runDrainTest(ctx, f, originalSizes, f.Namespace.Name, 1, 0, func(increasedSize int) { ginkgo.By("No nodes should be removed") time.Sleep(scaleDownTimeout) @@ -731,7 +732,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("should be able to scale down by draining multiple pods one by one as dictated by pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("should be able to scale down by draining multiple pods one by one as dictated by pdb", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { runDrainTest(ctx, f, originalSizes, f.Namespace.Name, 2, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, @@ -739,7 +740,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("should be able to scale down by draining system pods with pdb", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { runDrainTest(ctx, f, originalSizes, "kube-system", 2, 1, func(increasedSize int) { ginkgo.By("Some node should be removed") framework.ExpectNoError(WaitForClusterSizeFunc(ctx, f.ClientSet, @@ -747,7 +748,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) }) - ginkgo.It("Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("Should be able to scale a node group up from 0", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { // Provider-specific setup if framework.ProviderIs("gke") { // GKE-specific setup @@ -870,7 +871,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { gomega.Expect(newSize).To(gomega.BeEmpty()) } - ginkgo.It("Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("Should be able to scale a node group down to 0", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { if framework.ProviderIs("gke") { // In GKE, we can just add a node pool gkeScaleToZero(ctx) } else if len(originalSizes) >= 2 { @@ -880,7 +881,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { } }) - ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() clusterSize := nodeCount @@ -944,7 +945,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, len(nodes.Items), nodesRecoverTimeout)) }) - ginkgo.It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("shouldn't scale up when expendable pod is created", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { createPriorityClasses(ctx, f) // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. ginkgo.DeferCleanup(ReserveMemoryWithPriority, f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName) @@ -955,7 +956,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size == nodeCount }, time.Second)) }) - ginkgo.It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("should scale up when non expendable pod is created", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { createPriorityClasses(ctx, f) // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. cleanupFunc := ReserveMemoryWithPriority(ctx, f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) @@ -965,7 +966,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size > nodeCount }, time.Second)) }) - ginkgo.It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { + f.It("shouldn't scale up when expendable pod is preempted", feature.ClusterSizeAutoscalingScaleUp, func(ctx context.Context) { createPriorityClasses(ctx, f) // Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. cleanupFunc1 := ReserveMemoryWithPriority(ctx, f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName) @@ -977,7 +978,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size == nodeCount }, time.Second)) }) - ginkgo.It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("should scale down when expendable pod is running", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { createPriorityClasses(ctx, f) increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. @@ -988,7 +989,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { func(size int) bool { return size == nodeCount }, scaleDownTimeout)) }) - ginkgo.It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func(ctx context.Context) { + f.It("shouldn't scale down when non expendable pod is running", feature.ClusterSizeAutoscalingScaleDown, func(ctx context.Context) { createPriorityClasses(ctx, f) increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 4840a4fad7b75..78f4953d73a55 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -53,7 +54,7 @@ type externalMetricTarget struct { isAverage bool } -var _ = SIGDescribe("[HPA] [Feature:CustomMetricsAutoscaling] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() { +var _ = SIGDescribe("[HPA]", feature.CustomMetricsAutoscaling, "Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs("gce", "gke") }) diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 98f9945245f2e..b92e72bbfd7cd 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -104,7 +104,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() { // This test is separated because it is slow and need to run serially. // Will take around 5 minutes to run on a 4 nodes cluster. - ginkgo.It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func(ctx context.Context) { + f.It(f.WithSerial(), f.WithSlow(), "kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func(ctx context.Context) { numNodes, err := e2enode.TotalRegistered(ctx, c) framework.ExpectNoError(err) diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index 8de061322ede9..fe748a908ebd2 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -26,6 +26,7 @@ import ( autoscalingv2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling" ) @@ -42,11 +43,11 @@ const ( ) // These tests don't seem to be running properly in parallel: issue: #20338. -var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: CPU)", func() { +var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (scale resource: CPU)", func() { f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f.NamespacePodSecurityLevel = api.LevelBaseline - ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { + f.Describe(framework.WithSerial(), framework.WithSlow(), "Deployment (Pod Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, false, f) }) @@ -58,7 +59,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) - ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { + f.Describe(framework.WithSerial(), framework.WithSlow(), "Deployment (Container Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, cpuResource, utilizationMetricType, f) }) @@ -67,7 +68,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) - ginkgo.Describe("[Serial] [Slow] ReplicaSet", func() { + f.Describe(framework.WithSerial(), framework.WithSlow(), "ReplicaSet", func() { ginkgo.It(titleUp, func(ctx context.Context) { scaleUp(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) }) @@ -77,7 +78,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) // These tests take ~20 minutes each. - ginkgo.Describe("[Serial] [Slow] ReplicationController", func() { + f.Describe(framework.WithSerial(), framework.WithSlow(), "ReplicationController", func() { ginkgo.It(titleUp+" and verify decision stability", func(ctx context.Context) { scaleUp(ctx, "rc", e2eautoscaling.KindRC, cpuResource, utilizationMetricType, true, f) }) @@ -86,7 +87,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) - ginkgo.Describe("ReplicationController light", func() { + f.Describe("ReplicationController light", func() { ginkgo.It("Should scale from 1 pod to 2 pods", func(ctx context.Context) { st := &HPAScaleTest{ initPods: 1, @@ -101,7 +102,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C } st.run(ctx, "rc-light", e2eautoscaling.KindRC, f) }) - ginkgo.It("[Slow] Should scale from 2 pods to 1 pod", func(ctx context.Context) { + f.It(f.WithSlow(), "Should scale from 2 pods to 1 pod", func(ctx context.Context) { st := &HPAScaleTest{ initPods: 2, initCPUTotal: 50, @@ -117,7 +118,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) - ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() { + f.Describe(framework.WithSerial(), framework.WithSlow(), "ReplicaSet with idle sidecar (ContainerResource use case)", func() { // ContainerResource CPU autoscaling on idle sidecar ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func(ctx context.Context) { scaleOnIdleSideCar(ctx, "rs", e2eautoscaling.KindReplicaSet, cpuResource, utilizationMetricType, false, f) @@ -129,7 +130,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) - ginkgo.Describe("CustomResourceDefinition", func() { + f.Describe("CustomResourceDefinition", func() { ginkgo.It("Should scale with a CRD targetRef", func(ctx context.Context) { scaleTest := &HPAScaleTest{ initPods: 1, @@ -147,11 +148,11 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C }) }) -var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: Memory)", func() { +var _ = SIGDescribe(feature.HPA, "Horizontal pod autoscaling (scale resource: Memory)", func() { f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f.NamespacePodSecurityLevel = api.LevelBaseline - ginkgo.Describe("[Serial] [Slow] Deployment (Pod Resource)", func() { + f.Describe(framework.WithSerial(), framework.WithSlow(), "Deployment (Pod Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUp(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, false, f) }) @@ -160,7 +161,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: M }) }) - ginkgo.Describe("[Serial] [Slow] Deployment (Container Resource)", func() { + f.Describe(framework.WithSerial(), framework.WithSlow(), "Deployment (Container Resource)", func() { ginkgo.It(titleUp+titleAverageUtilization, func(ctx context.Context) { scaleUpContainerResource(ctx, "test-deployment", e2eautoscaling.KindDeployment, memResource, utilizationMetricType, f) }) diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go b/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go index d9468e6482b55..518a26604f07d 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling_behavior.go @@ -21,6 +21,7 @@ import ( "time" autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling" admissionapi "k8s.io/pod-security-admission/api" @@ -29,7 +30,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("[Feature:HPA] [Serial] [Slow] Horizontal pod autoscaling (non-default behavior)", func() { +var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (non-default behavior)", func() { f := framework.NewDefaultFramework("horizontal-pod-autoscaling") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/cloud/gcp/apps/stateful_apps.go b/test/e2e/cloud/gcp/apps/stateful_apps.go index 08e736f52ca90..4c21ee0c3c16f 100644 --- a/test/e2e/cloud/gcp/apps/stateful_apps.go +++ b/test/e2e/cloud/gcp/apps/stateful_apps.go @@ -20,6 +20,7 @@ import ( "context" "k8s.io/kubernetes/test/e2e/cloud/gcp/common" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/upgrades" @@ -36,7 +37,7 @@ var upgradeTests = []upgrades.Test{ &apps.CassandraUpgradeTest{}, } -var _ = SIGDescribe("stateful Upgrade [Feature:StatefulUpgrade]", func() { +var _ = SIGDescribe("stateful Upgrade", feature.StatefulUpgrade, func() { f := framework.NewDefaultFramework("stateful-upgrade") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) diff --git a/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go b/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go index 9877bbc96cd23..3060d9c97ffb5 100644 --- a/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go +++ b/test/e2e/cloud/gcp/auth/service_account_admission_controller_migration.go @@ -20,6 +20,7 @@ import ( "context" "k8s.io/kubernetes/test/e2e/cloud/gcp/common" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades/auth" @@ -33,7 +34,7 @@ var upgradeTests = []upgrades.Test{ &auth.ServiceAccountAdmissionControllerMigrationTest{}, } -var _ = SIGDescribe("ServiceAccount admission controller migration [Feature:BoundServiceAccountTokenVolume]", func() { +var _ = SIGDescribe("ServiceAccount admission controller migration", feature.BoundServiceAccountTokenVolume, func() { f := framework.NewDefaultFramework("serviceaccount-admission-controller-migration") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index 9a9caf5b4061d..af1e27fbf082b 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -20,6 +20,7 @@ import ( "context" "k8s.io/kubernetes/test/e2e/cloud/gcp/common" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades/apps" @@ -52,7 +53,7 @@ var upgradeTests = []upgrades.Test{ &storage.VolumeModeDowngradeTest{}, } -var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { +var _ = SIGDescribe("Upgrade", feature.Upgrade, func() { f := framework.NewDefaultFramework("cluster-upgrade") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) @@ -60,7 +61,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { // Create the frameworks here because we can only create them // in a "Describe". ginkgo.Describe("master upgrade", func() { - ginkgo.It("should maintain a functioning cluster [Feature:MasterUpgrade]", func(ctx context.Context) { + f.It("should maintain a functioning cluster", feature.MasterUpgrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -77,7 +78,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { }) ginkgo.Describe("cluster upgrade", func() { - ginkgo.It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func(ctx context.Context) { + f.It("should maintain a functioning cluster", feature.ClusterUpgrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -91,13 +92,13 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() { }) }) -var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() { +var _ = SIGDescribe("Downgrade", feature.Downgrade, func() { f := framework.NewDefaultFramework("cluster-downgrade") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.Describe("cluster downgrade", func() { - ginkgo.It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func(ctx context.Context) { + f.It("should maintain a functioning cluster", feature.ClusterDowngrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/gke_node_pools.go b/test/e2e/cloud/gcp/gke_node_pools.go index cd7dbbd9cedc0..cb801c14c5017 100644 --- a/test/e2e/cloud/gcp/gke_node_pools.go +++ b/test/e2e/cloud/gcp/gke_node_pools.go @@ -21,6 +21,7 @@ import ( "fmt" "os/exec" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -29,7 +30,7 @@ import ( "github.com/onsi/ginkgo/v2" ) -var _ = SIGDescribe("GKE node pools [Feature:GKENodePool]", func() { +var _ = SIGDescribe("GKE node pools", feature.GKENodePool, func() { f := framework.NewDefaultFramework("node-pools") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -38,7 +39,7 @@ var _ = SIGDescribe("GKE node pools [Feature:GKENodePool]", func() { e2eskipper.SkipUnlessProviderIs("gke") }) - ginkgo.It("should create a cluster with multiple node pools [Feature:GKENodePool]", func(ctx context.Context) { + f.It("should create a cluster with multiple node pools", feature.GKENodePool, func(ctx context.Context) { framework.Logf("Start create node pool test") testCreateDeleteNodePool(ctx, f, "test-pool") }) diff --git a/test/e2e/cloud/gcp/ha_master.go b/test/e2e/cloud/gcp/ha_master.go index 7a8349d7e92fb..09bc5e2467a24 100644 --- a/test/e2e/cloud/gcp/ha_master.go +++ b/test/e2e/cloud/gcp/ha_master.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -160,7 +161,7 @@ func waitForMasters(ctx context.Context, masterPrefix string, c clientset.Interf return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size) } -var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { +var _ = SIGDescribe("HA-master", feature.HAMaster, func() { f := framework.NewDefaultFramework("ha-master") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var c clientset.Interface @@ -227,7 +228,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { verifyRCs(ctx, c, ns, existingRCs) } - ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func(ctx context.Context) { + f.It("survive addition/removal replicas same zone", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { zone := framework.TestContext.CloudConfig.Zone step(ctx, None, "") numAdditionalReplicas := 2 @@ -239,7 +240,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { } }) - ginkgo.It("survive addition/removal replicas different zones [Serial][Disruptive]", func(ctx context.Context) { + f.It("survive addition/removal replicas different zones", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { zone := framework.TestContext.CloudConfig.Zone region := findRegionForZone(zone) zones := findZonesForRegion(region) @@ -257,7 +258,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() { } }) - ginkgo.It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func(ctx context.Context) { + f.It("survive addition/removal replicas multizone workers", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { zone := framework.TestContext.CloudConfig.Zone region := findRegionForZone(zone) zones := findZonesForRegion(region) diff --git a/test/e2e/cloud/gcp/kubelet_security.go b/test/e2e/cloud/gcp/kubelet_security.go index ea66af55f3383..1b08c0484869d 100644 --- a/test/e2e/cloud/gcp/kubelet_security.go +++ b/test/e2e/cloud/gcp/kubelet_security.go @@ -25,6 +25,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/cluster/ports" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -33,7 +34,7 @@ import ( "github.com/onsi/ginkgo/v2" ) -var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() { +var _ = SIGDescribe("Ports Security Check", feature.KubeletSecurity, func() { f := framework.NewDefaultFramework("kubelet-security") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/cloud/gcp/network/kube_proxy_migration.go b/test/e2e/cloud/gcp/network/kube_proxy_migration.go index 0a7fd727c9490..7322745092978 100644 --- a/test/e2e/cloud/gcp/network/kube_proxy_migration.go +++ b/test/e2e/cloud/gcp/network/kube_proxy_migration.go @@ -21,6 +21,7 @@ import ( "fmt" "k8s.io/kubernetes/test/e2e/cloud/gcp/common" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/upgrades" @@ -45,7 +46,7 @@ func kubeProxyDaemonSetExtraEnvs(enableKubeProxyDaemonSet bool) []string { return []string{fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet)} } -var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]", func() { +var _ = SIGDescribe("kube-proxy migration", feature.KubeProxyDaemonSetMigration, func() { f := framework.NewDefaultFramework("kube-proxy-ds-migration") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged upgradeTestFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) @@ -56,7 +57,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]" }) ginkgo.Describe("Upgrade kube-proxy from static pods to a DaemonSet", func() { - ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func(ctx context.Context) { + f.It("should maintain a functioning cluster", feature.KubeProxyDaemonSetUpgrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -74,7 +75,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]" }) ginkgo.Describe("Downgrade kube-proxy from a DaemonSet to static pods", func() { - ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func(ctx context.Context) { + f.It("should maintain a functioning cluster", feature.KubeProxyDaemonSetDowngrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/node/gpu.go b/test/e2e/cloud/gcp/node/gpu.go index f6e23c29f0313..e851b4cd79d43 100644 --- a/test/e2e/cloud/gcp/node/gpu.go +++ b/test/e2e/cloud/gcp/node/gpu.go @@ -20,6 +20,7 @@ import ( "context" "k8s.io/kubernetes/test/e2e/cloud/gcp/common" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/upgrades" "k8s.io/kubernetes/test/e2e/upgrades/node" @@ -33,13 +34,13 @@ var upgradeTests = []upgrades.Test{ &node.NvidiaGPUUpgradeTest{}, } -var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { +var _ = SIGDescribe("gpu Upgrade", feature.GPUUpgrade, func() { f := framework.NewDefaultFramework("gpu-upgrade") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.Describe("master upgrade", func() { - ginkgo.It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func(ctx context.Context) { + f.It("should NOT disrupt gpu pod", feature.GPUMasterUpgrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -52,7 +53,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { }) }) ginkgo.Describe("cluster upgrade", func() { - ginkgo.It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func(ctx context.Context) { + f.It("should be able to run gpu pod after upgrade", feature.GPUClusterUpgrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) @@ -65,7 +66,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() { }) }) ginkgo.Describe("cluster downgrade", func() { - ginkgo.It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func(ctx context.Context) { + f.It("should be able to run gpu pod after downgrade", feature.GPUClusterDowngrade, func(ctx context.Context) { upgCtx, err := common.GetUpgradeContext(f.ClientSet.Discovery()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index ec75c49371a12..5a85460e91907 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -35,7 +35,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("[Disruptive]NodeLease", func() { +var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() { f := framework.NewDefaultFramework("node-lease-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var systemPodsNo int32 diff --git a/test/e2e/cloud/gcp/reboot.go b/test/e2e/cloud/gcp/reboot.go index 55fea740c7445..d16e26cb3eca5 100644 --- a/test/e2e/cloud/gcp/reboot.go +++ b/test/e2e/cloud/gcp/reboot.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -54,7 +55,7 @@ const ( rebootPodReadyAgainTimeout = 5 * time.Minute ) -var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { +var _ = SIGDescribe("Reboot", framework.WithDisruptive(), feature.Reboot, func() { var f *framework.Framework ginkgo.BeforeEach(func() { diff --git a/test/e2e/cloud/gcp/recreate_node.go b/test/e2e/cloud/gcp/recreate_node.go index bb5fdd0531156..84ec76f143d3e 100644 --- a/test/e2e/cloud/gcp/recreate_node.go +++ b/test/e2e/cloud/gcp/recreate_node.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -42,7 +43,7 @@ const ( recreateNodeReadyAgainTimeout = 10 * time.Minute ) -var _ = SIGDescribe("Recreate [Feature:Recreate]", func() { +var _ = SIGDescribe("Recreate", feature.Recreate, func() { f := framework.NewDefaultFramework("recreate") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var originalNodes []v1.Node diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index bf8961ec41a55..916d761fbcd41 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -44,7 +44,7 @@ func resizeRC(ctx context.Context, c clientset.Interface, ns, name string, repli return err } -var _ = SIGDescribe("Nodes [Disruptive]", func() { +var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("resize-nodes") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var systemPodsNo int32 @@ -66,7 +66,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { }) // Slow issue #13323 (8 min) - ginkgo.Describe("Resize [Slow]", func() { + f.Describe("Resize", framework.WithSlow(), func() { var originalNodeCount int32 ginkgo.BeforeEach(func() { diff --git a/test/e2e/cloud/gcp/restart.go b/test/e2e/cloud/gcp/restart.go index 7fd5341b5ba41..ed23660e2f276 100644 --- a/test/e2e/cloud/gcp/restart.go +++ b/test/e2e/cloud/gcp/restart.go @@ -43,7 +43,7 @@ func nodeNames(nodes []v1.Node) []string { return result } -var _ = SIGDescribe("Restart [Disruptive]", func() { +var _ = SIGDescribe("Restart", framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("restart") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ps *testutils.PodStore diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index 382182ac398d0..edf3d2fc391d7 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -24,6 +24,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -33,7 +34,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { +var _ = SIGDescribe(feature.CloudProvider, framework.WithDisruptive(), "Nodes", func() { f := framework.NewDefaultFramework("cloudprovider") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var c clientset.Interface diff --git a/test/e2e/common/network/networking.go b/test/e2e/common/network/networking.go index b175157e55eb6..2652143bab514 100644 --- a/test/e2e/common/network/networking.go +++ b/test/e2e/common/network/networking.go @@ -22,6 +22,7 @@ import ( "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" admissionapi "k8s.io/pod-security-admission/api" @@ -81,7 +82,7 @@ var _ = SIGDescribe("Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - framework.ConformanceIt("should function for intra-pod communication: http [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should function for intra-pod communication: http", f.WithNodeConformance(), func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false) checkPodToPodConnectivity(ctx, config, "http", e2enetwork.EndpointHTTPPort) }) @@ -92,7 +93,7 @@ var _ = SIGDescribe("Networking", func() { Description: Create a hostexec pod that is capable of curl to netcat commands. Create a test Pod that will act as a webserver front end exposing ports 8080 for tcp and 8081 for udp. The netserver service proxies are created on specified number of nodes. The kubectl exec on the webserver container MUST reach a udp port on the each of service proxy endpoints in the cluster and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. */ - framework.ConformanceIt("should function for intra-pod communication: udp [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should function for intra-pod communication: udp", f.WithNodeConformance(), func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, false) checkPodToPodConnectivity(ctx, config, "udp", e2enetwork.EndpointUDPPort) }) @@ -104,7 +105,7 @@ var _ = SIGDescribe("Networking", func() { The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=tcp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. This test is marked LinuxOnly it breaks when using Overlay networking with Windows. */ - framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should function for node-pod communication: http [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true) for _, endpointPod := range config.EndpointPods { err := config.DialFromNode(ctx, "http", endpointPod.Status.PodIP, e2enetwork.EndpointHTTPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -121,7 +122,7 @@ var _ = SIGDescribe("Networking", func() { The kubectl exec on the webserver container MUST reach a http port on the each of service proxy endpoints in the cluster using a http post(protocol=udp) and the request MUST be successful. Container will execute curl command to reach the service port within specified max retry limit and MUST result in reporting unique hostnames. This test is marked LinuxOnly it breaks when using Overlay networking with Windows. */ - framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should function for node-pod communication: udp [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { config := e2enetwork.NewCoreNetworkingTestConfig(ctx, f, true) for _, endpointPod := range config.EndpointPods { err := config.DialFromNode(ctx, "udp", endpointPod.Status.PodIP, e2enetwork.EndpointUDPPort, config.MaxTries, 0, sets.NewString(endpointPod.Name)) @@ -131,12 +132,12 @@ var _ = SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for intra-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) { + f.It("should function for intra-pod communication: sctp [LinuxOnly]", feature.SCTPConnectivity, func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) checkPodToPodConnectivity(ctx, config, "sctp", e2enetwork.EndpointSCTPPort) }) - ginkgo.It("should function for node-pod communication: sctp [LinuxOnly][Feature:SCTPConnectivity]", func(ctx context.Context) { + f.It("should function for node-pod communication: sctp [LinuxOnly]", feature.SCTPConnectivity, func(ctx context.Context) { ginkgo.Skip("Skipping SCTP node to pod test until DialFromNode supports SCTP #96482") config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) for _, endpointPod := range config.EndpointPods { diff --git a/test/e2e/common/node/configmap.go b/test/e2e/common/node/configmap.go index 893ac3a097153..e7fd1713d3a3a 100644 --- a/test/e2e/common/node/configmap.go +++ b/test/e2e/common/node/configmap.go @@ -43,7 +43,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap, from environment field Description: Create a Pod with an environment variable value set using a value from ConfigMap. A ConfigMap value MUST be accessible in the container environment. */ - framework.ConformanceIt("should be consumable via environment variable [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable via environment variable", f.WithNodeConformance(), func(ctx context.Context) { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) @@ -91,7 +91,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap, from environment variables Description: Create a Pod with a environment source from ConfigMap. All ConfigMap values MUST be available as environment variables in the container. */ - framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable via the environment", f.WithNodeConformance(), func(ctx context.Context) { name := "configmap-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go index b7adf1492d2e7..21d8af50677e3 100644 --- a/test/e2e/common/node/container_probe.go +++ b/test/e2e/common/node/container_probe.go @@ -24,18 +24,18 @@ import ( "strings" "time" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/watch" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -68,7 +68,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod readiness probe, with initial delay Description: Create a Pod that is configured with a initial delay set on the readiness probe. Check the Pod Start time to compare to the initial delay. The Pod MUST be ready only after the specified initial delay. */ - framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("with readiness probe should not be ready before initial delay and never restart", f.WithNodeConformance(), func(ctx context.Context) { containerName := "test-webserver" p := podClient.Create(ctx, testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80)) framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, p.Name, f.Namespace.Name, framework.PodStartTimeout)) @@ -104,7 +104,7 @@ var _ = SIGDescribe("Probing container", func() { Description: Create a Pod with a readiness probe that fails consistently. When this Pod is created, then the Pod MUST never be ready, never be running and restart count MUST be zero. */ - framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("with readiness probe that fails should never be ready and never restart", f.WithNodeConformance(), func(ctx context.Context) { p := podClient.Create(ctx, testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80)) gomega.Consistently(ctx, func() (bool, error) { p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{}) @@ -131,7 +131,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using local file, restart Description: Create a Pod with liveness probe that uses ExecAction handler to cat /temp/health file. The Container deletes the file /temp/health after 10 second, triggering liveness probe to fail. The Pod MUST now be killed and restarted incrementing restart count to 1. */ - framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be restarted with a exec \"cat /tmp/health\" liveness probe", f.WithNodeConformance(), func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"cat", "/tmp/health"}), @@ -148,7 +148,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using local file, no restart Description: Pod is created with liveness probe that uses 'exec' command to cat /temp/health file. Liveness probe MUST not fail to check health and the restart count should remain 0. */ - framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe", f.WithNodeConformance(), func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"cat", "/tmp/health"}), @@ -165,7 +165,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using http endpoint, restart Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. */ - framework.ConformanceIt("should be restarted with a /healthz http liveness probe [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be restarted with a /healthz http liveness probe", f.WithNodeConformance(), func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/healthz", 8080), InitialDelaySeconds: 15, @@ -180,7 +180,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using tcp socket, no restart Description: A Pod is created with liveness probe on tcp socket 8080. The http handler on port 8080 will return http errors after 10 seconds, but the socket will remain open. Liveness probe MUST not fail to check health and the restart count should remain 0. */ - framework.ConformanceIt("should *not* be restarted with a tcp:8080 liveness probe [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should *not* be restarted with a tcp:8080 liveness probe", f.WithNodeConformance(), func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: tcpSocketHandler(8080), InitialDelaySeconds: 15, @@ -195,7 +195,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using http endpoint, multiple restarts (slow) Description: A Pod is created with liveness probe on http endpoint /healthz. The http handler on the /healthz will return a http error after 10 seconds since the Pod is started. This MUST result in liveness check failure. The Pod MUST now be killed and restarted incrementing restart count to 1. The liveness probe must fail again after restart once the http handler for /healthz enpoind on the Pod returns an http error after 10 seconds from the start. Restart counts MUST increment every time health check fails, measure up to 5 restart. */ - framework.ConformanceIt("should have monotonically increasing restart count [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should have monotonically increasing restart count", f.WithNodeConformance(), func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/healthz", 8080), InitialDelaySeconds: 5, @@ -211,7 +211,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using http endpoint, failure Description: A Pod is created with liveness probe on http endpoint '/'. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero. */ - framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should *not* be restarted with a /healthz http liveness probe", f.WithNodeConformance(), func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: httpGetHandler("/", 80), InitialDelaySeconds: 15, @@ -227,7 +227,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, container exec timeout, restart Description: A Pod is created with liveness probe with a Exec action on the Pod. If the liveness probe call does not return within the timeout specified, liveness probe MUST restart the Pod. */ - ginkgo.It("should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func(ctx context.Context) { + f.It("should be restarted with an exec liveness probe with timeout [MinimumKubeletVersion:1.20]", f.WithNodeConformance(), func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 600"} livenessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}), @@ -244,7 +244,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod readiness probe, container exec timeout, not ready Description: A Pod is created with readiness probe with a Exec action on the Pod. If the readiness probe call does not return within the timeout specified, readiness probe MUST not be Ready. */ - ginkgo.It("should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20] [NodeConformance]", func(ctx context.Context) { + f.It("should not be ready with an exec readiness probe timeout [MinimumKubeletVersion:1.20]", f.WithNodeConformance(), func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 600"} readinessProbe := &v1.Probe{ ProbeHandler: execHandler([]string{"/bin/sh", "-c", "sleep 10"}), @@ -459,7 +459,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Set terminationGracePeriodSeconds for livenessProbe Description: A pod with a long terminationGracePeriod is created with a shorter livenessProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used. */ - ginkgo.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set [NodeConformance]", func(ctx context.Context) { + f.It("should override timeoutGracePeriodSeconds when LivenessProbe field is set", f.WithNodeConformance(), func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 1000"} // probe will fail since pod has no http endpoints shortGracePeriod := int64(5) @@ -489,7 +489,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Set terminationGracePeriodSeconds for startupProbe Description: A pod with a long terminationGracePeriod is created with a shorter startupProbe-level terminationGracePeriodSeconds. We confirm the shorter termination period is used. */ - ginkgo.It("should override timeoutGracePeriodSeconds when StartupProbe field is set [NodeConformance]", func(ctx context.Context) { + f.It("should override timeoutGracePeriodSeconds when StartupProbe field is set", f.WithNodeConformance(), func(ctx context.Context) { cmd := []string{"/bin/sh", "-c", "sleep 1000"} // probe will fail since pod has no http endpoints livenessProbe := &v1.Probe{ @@ -524,7 +524,7 @@ var _ = SIGDescribe("Probing container", func() { Testname: Pod liveness probe, using grpc call, success Description: A Pod is created with liveness probe on grpc service. Liveness probe on this endpoint will not fail. When liveness probe does not fail then the restart count MUST remain zero. */ - framework.ConformanceIt("should *not* be restarted with a GRPC liveness probe [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should *not* be restarted with a GRPC liveness probe", f.WithNodeConformance(), func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: v1.ProbeHandler{ GRPC: &v1.GRPCAction{ @@ -547,7 +547,7 @@ var _ = SIGDescribe("Probing container", func() { Description: A Pod is created with liveness probe on grpc service. Liveness probe on this endpoint should fail because of wrong probe port. When liveness probe does fail then the restart count should +1. */ - framework.ConformanceIt("should be restarted with a GRPC liveness probe [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be restarted with a GRPC liveness probe", f.WithNodeConformance(), func(ctx context.Context) { livenessProbe := &v1.Probe{ ProbeHandler: v1.ProbeHandler{ GRPC: &v1.GRPCAction{ @@ -729,7 +729,7 @@ done }) }) -var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContainers] Probing restartable init container", func() { +var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers]", feature.SidecarContainers, "Probing restartable init container", func() { f := framework.NewDefaultFramework("container-probe") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline var podClient *e2epod.PodClient diff --git a/test/e2e/common/node/containers.go b/test/e2e/common/node/containers.go index 33d54b128c527..0df79bfdc813e 100644 --- a/test/e2e/common/node/containers.go +++ b/test/e2e/common/node/containers.go @@ -38,7 +38,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, without command and arguments Description: Default command and arguments from the container image entrypoint MUST be used when Pod does not specify the container command */ - framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should use the image defaults if command and args are blank", f.WithNodeConformance(), func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name) pod.Spec.Containers[0].Args = nil pod = e2epod.NewPodClient(f).Create(ctx, pod) @@ -58,7 +58,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, with arguments Description: Default command and from the container image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified. */ - framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be able to override the image's default arguments (container cmd)", f.WithNodeConformance(), func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") e2epodoutput.TestContainerOutput(ctx, f, "override arguments", pod, 0, []string{ "[/agnhost entrypoint-tester override arguments]", @@ -72,7 +72,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, with command Description: Default command from the container image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image. */ - framework.ConformanceIt("should be able to override the image's default command (container entrypoint) [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be able to override the image's default command (container entrypoint)", f.WithNodeConformance(), func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester") pod.Spec.Containers[0].Command = []string{"/agnhost-2"} @@ -86,7 +86,7 @@ var _ = SIGDescribe("Containers", func() { Testname: Containers, with command and arguments Description: Default command and arguments from the container image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image. */ - framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be able to override the image's default command and arguments", f.WithNodeConformance(), func(ctx context.Context) { pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") pod.Spec.Containers[0].Command = []string{"/agnhost-2"} diff --git a/test/e2e/common/node/downwardapi.go b/test/e2e/common/node/downwardapi.go index 83ae5395e43fa..a2f7c71a37331 100644 --- a/test/e2e/common/node/downwardapi.go +++ b/test/e2e/common/node/downwardapi.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -42,7 +43,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for name, namespace and ip Description: Downward API MUST expose Pod and Container fields as environment variables. Specify Pod Name, namespace and IP as environment variable in the Pod Spec are visible at runtime in the container. */ - framework.ConformanceIt("should provide pod name, namespace and IP address as env vars [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide pod name, namespace and IP address as env vars", f.WithNodeConformance(), func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -88,7 +89,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for host ip Description: Downward API MUST expose Pod and Container fields as environment variables. Specify host IP as environment variable in the Pod Spec are visible at runtime in the container. */ - framework.ConformanceIt("should provide host IP as an env var [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide host IP as an env var", f.WithNodeConformance(), func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -164,7 +165,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for CPU and memory limits and requests Description: Downward API MUST expose CPU request and Memory request set through environment variables at runtime in the container. */ - framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's limits.cpu/memory and requests.cpu/memory as env vars", f.WithNodeConformance(), func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -215,7 +216,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for default CPU and memory limits and requests Description: Downward API MUST expose CPU request and Memory limits set through environment variables at runtime in the container. */ - framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide default limits.cpu/memory from node allocatable", f.WithNodeConformance(), func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -265,7 +266,7 @@ var _ = SIGDescribe("Downward API", func() { Testname: DownwardAPI, environment for Pod UID Description: Downward API MUST expose Pod UID set through environment variables at runtime in the container. */ - framework.ConformanceIt("should provide pod UID as env vars [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide pod UID as env vars", f.WithNodeConformance(), func(ctx context.Context) { podName := "downward-api-" + string(uuid.NewUUID()) env := []v1.EnvVar{ { @@ -287,7 +288,7 @@ var _ = SIGDescribe("Downward API", func() { }) }) -var _ = SIGDescribe("Downward API [Serial] [Disruptive] [NodeFeature:DownwardAPIHugePages]", func() { +var _ = SIGDescribe("Downward API", framework.WithSerial(), framework.WithDisruptive(), nodefeature.DownwardAPIHugePages, func() { f := framework.NewDefaultFramework("downward-api") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/common/node/ephemeral_containers.go b/test/e2e/common/node/ephemeral_containers.go index 563f84c58bd43..094a8924e6f46 100644 --- a/test/e2e/common/node/ephemeral_containers.go +++ b/test/e2e/common/node/ephemeral_containers.go @@ -37,7 +37,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("Ephemeral Containers [NodeConformance]", func() { +var _ = SIGDescribe("Ephemeral Containers", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("ephemeral-containers-test") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline var podClient *e2epod.PodClient diff --git a/test/e2e/common/node/expansion.go b/test/e2e/common/node/expansion.go index f545164cb05c5..a24f48b78576d 100644 --- a/test/e2e/common/node/expansion.go +++ b/test/e2e/common/node/expansion.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: Environment variables, expansion Description: Create a Pod with environment variables. Environment variables defined using previously defined environment variables MUST expand to proper values. */ - framework.ConformanceIt("should allow composing env vars into new env vars [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should allow composing env vars into new env vars", f.WithNodeConformance(), func(ctx context.Context) { envVars := []v1.EnvVar{ { Name: "FOO", @@ -73,7 +73,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: Environment variables, command expansion Description: Create a Pod with environment variables and container command using them. Container command using the defined environment variables MUST expand to proper values. */ - framework.ConformanceIt("should allow substituting values in a container's command [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should allow substituting values in a container's command", f.WithNodeConformance(), func(ctx context.Context) { envVars := []v1.EnvVar{ { Name: "TEST_VAR", @@ -92,7 +92,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: Environment variables, command argument expansion Description: Create a Pod with environment variables and container command arguments using them. Container command arguments using the defined environment variables MUST expand to proper values. */ - framework.ConformanceIt("should allow substituting values in a container's args [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should allow substituting values in a container's args", f.WithNodeConformance(), func(ctx context.Context) { envVars := []v1.EnvVar{ { Name: "TEST_VAR", @@ -152,7 +152,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: VolumeSubpathEnvExpansion, subpath with backticks Description: Make sure a container's subpath can not be set using an expansion of environment variables when backticks are supplied. */ - framework.ConformanceIt("should fail substituting values in a volume subpath with backticks [Slow]", func(ctx context.Context) { + framework.ConformanceIt("should fail substituting values in a volume subpath with backticks", f.WithSlow(), func(ctx context.Context) { envVars := []v1.EnvVar{ { @@ -186,7 +186,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: VolumeSubpathEnvExpansion, subpath with absolute path Description: Make sure a container's subpath can not be set using an expansion of environment variables when absolute path is supplied. */ - framework.ConformanceIt("should fail substituting values in a volume subpath with absolute path [Slow]", func(ctx context.Context) { + framework.ConformanceIt("should fail substituting values in a volume subpath with absolute path", f.WithSlow(), func(ctx context.Context) { absolutePath := "/tmp" if framework.NodeOSDistroIs("windows") { // Windows does not typically have a C:\tmp folder. @@ -225,7 +225,7 @@ var _ = SIGDescribe("Variable Expansion", func() { Testname: VolumeSubpathEnvExpansion, subpath ready from failed state Description: Verify that a failing subpath expansion can be modified during the lifecycle of a container. */ - framework.ConformanceIt("should verify that a failing subpath expansion can be modified during the lifecycle of a container [Slow]", func(ctx context.Context) { + framework.ConformanceIt("should verify that a failing subpath expansion can be modified during the lifecycle of a container", f.WithSlow(), func(ctx context.Context) { envVars := []v1.EnvVar{ { @@ -297,7 +297,7 @@ var _ = SIGDescribe("Variable Expansion", func() { 3. successful expansion of the subpathexpr isn't required for volume cleanup */ - framework.ConformanceIt("should succeed in writing subpaths in container [Slow]", func(ctx context.Context) { + framework.ConformanceIt("should succeed in writing subpaths in container", f.WithSlow(), func(ctx context.Context) { envVars := []v1.EnvVar{ { diff --git a/test/e2e/common/node/image_credential_provider.go b/test/e2e/common/node/image_credential_provider.go index 662f33819d0b3..52f2d82a9a5aa 100644 --- a/test/e2e/common/node/image_credential_provider.go +++ b/test/e2e/common/node/image_credential_provider.go @@ -24,13 +24,14 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) -var _ = SIGDescribe("ImageCredentialProvider [Feature:KubeletCredentialProviders]", func() { +var _ = SIGDescribe("ImageCredentialProvider", feature.KubeletCredentialProviders, func() { f := framework.NewDefaultFramework("image-credential-provider") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var podClient *e2epod.PodClient diff --git a/test/e2e/common/node/init_container.go b/test/e2e/common/node/init_container.go index 0086defa3b83d..828cfc8b41598 100644 --- a/test/e2e/common/node/init_container.go +++ b/test/e2e/common/node/init_container.go @@ -159,7 +159,7 @@ func initContainersInvariants(pod *v1.Pod) error { return nil } -var _ = SIGDescribe("InitContainer [NodeConformance]", func() { +var _ = SIGDescribe("InitContainer", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("init-container") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline var podClient *e2epod.PodClient diff --git a/test/e2e/common/node/kubelet.go b/test/e2e/common/node/kubelet.go index ec50dd437cc28..c4f1c8b9c15af 100644 --- a/test/e2e/common/node/kubelet.go +++ b/test/e2e/common/node/kubelet.go @@ -48,7 +48,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, log output, default Description: By default the stdout and stderr from the process being executed in a pod MUST be sent to the pod's logs. */ - framework.ConformanceIt("should print the output to logs [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should print the output to logs", f.WithNodeConformance(), func(ctx context.Context) { podClient.CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -106,7 +106,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, failed pod, terminated reason Description: Create a Pod with terminated state. Pod MUST have only one container. Container MUST be in terminated state and MUST have an terminated reason. */ - framework.ConformanceIt("should have an terminated reason [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should have an terminated reason", f.WithNodeConformance(), func(ctx context.Context) { gomega.Eventually(ctx, func() error { podData, err := podClient.Get(ctx, podName, metav1.GetOptions{}) if err != nil { @@ -131,7 +131,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, failed pod, delete Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted. */ - framework.ConformanceIt("should be possible to delete [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be possible to delete", f.WithNodeConformance(), func(ctx context.Context) { err := podClient.Delete(ctx, podName, metav1.DeleteOptions{}) framework.ExpectNoError(err, "deleting Pod") }) @@ -144,7 +144,7 @@ var _ = SIGDescribe("Kubelet", func() { Testname: Kubelet, hostAliases Description: Create a Pod with hostAliases and a container with command to output /etc/hosts entries. Pod's logs MUST have matching entries of specified hostAliases to the output of /etc/hosts entries. */ - framework.ConformanceIt("should write entries to /etc/hosts [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should write entries to /etc/hosts", f.WithNodeConformance(), func(ctx context.Context) { pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil, "etc-hosts") // Don't restart the Pod since it is expected to exit pod.Spec.RestartPolicy = v1.RestartPolicyNever @@ -180,7 +180,7 @@ var _ = SIGDescribe("Kubelet", func() { Description: Create a Pod with security context set with ReadOnlyRootFileSystem set to true. The Pod then tries to write to the /file on the root, write operation to the root filesystem MUST fail as expected. This test is marked LinuxOnly since Windows does not support creating containers with read-only access. */ - framework.ConformanceIt("should not write to root filesystem [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should not write to root filesystem [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { isReadOnly := true podClient.CreateSync(ctx, &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/e2e/common/node/kubelet_etc_hosts.go b/test/e2e/common/node/kubelet_etc_hosts.go index dcdf0e3449669..3b783ddcd34b3 100644 --- a/test/e2e/common/node/kubelet_etc_hosts.go +++ b/test/e2e/common/node/kubelet_etc_hosts.go @@ -61,7 +61,7 @@ var _ = SIGDescribe("KubeletManagedEtcHosts", func() { 3. The Pod with hostNetwork=true , /etc/hosts file MUST not be managed by the Kubelet. This test is marked LinuxOnly since Windows cannot mount individual files in Containers. */ - framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should test kubelet managed /etc/hosts file [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("Setting up the test") config.setup(ctx) diff --git a/test/e2e/common/node/lifecycle_hook.go b/test/e2e/common/node/lifecycle_hook.go index e423ef5c106f5..a6d29c2b9e396 100644 --- a/test/e2e/common/node/lifecycle_hook.go +++ b/test/e2e/common/node/lifecycle_hook.go @@ -25,6 +25,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -132,7 +133,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, post start exec hook Description: When a post start handler is specified in the container lifecycle using a 'Exec' action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a post start that invokes the server pod using ExecAction to validate that the post start is executed. */ - framework.ConformanceIt("should execute poststart exec hook properly [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should execute poststart exec hook properly", f.WithNodeConformance(), func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PostStart: &v1.LifecycleHandler{ Exec: &v1.ExecAction{ @@ -149,7 +150,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, prestop exec hook Description: When a pre-stop handler is specified in the container lifecycle using a 'Exec' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod with a container lifecycle specifying a pre-stop that invokes the server pod using ExecAction to validate that the pre-stop is executed. */ - framework.ConformanceIt("should execute prestop exec hook properly [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should execute prestop exec hook properly", f.WithNodeConformance(), func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PreStop: &v1.LifecycleHandler{ Exec: &v1.ExecAction{ @@ -165,7 +166,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, post start http hook Description: When a post start handler is specified in the container lifecycle using a HttpGet action, then the handler MUST be invoked after the start of the container. A server pod is created that will serve http requests, create a second pod on the same node with a container lifecycle specifying a post start that invokes the server pod to validate that the post start is executed. */ - framework.ConformanceIt("should execute poststart http hook properly [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should execute poststart http hook properly", f.WithNodeConformance(), func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PostStart: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ @@ -187,7 +188,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, poststart https hook Description: When a post-start handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve https requests, create a second pod on the same node with a container lifecycle specifying a post-start that invokes the server pod to validate that the post-start is executed. */ - ginkgo.It("should execute poststart https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func(ctx context.Context) { + f.It("should execute poststart https hook properly [MinimumKubeletVersion:1.23]", f.WithNodeConformance(), func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PostStart: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ @@ -210,7 +211,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, prestop http hook Description: When a pre-stop handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve http requests, create a second pod on the same node with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed. */ - framework.ConformanceIt("should execute prestop http hook properly [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should execute prestop http hook properly", f.WithNodeConformance(), func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PreStop: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ @@ -232,7 +233,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { Testname: Pod Lifecycle, prestop https hook Description: When a pre-stop handler is specified in the container lifecycle using a 'HttpGet' action, then the handler MUST be invoked before the container is terminated. A server pod is created that will serve https requests, create a second pod on the same node with a container lifecycle specifying a pre-stop that invokes the server pod to validate that the pre-stop is executed. */ - ginkgo.It("should execute prestop https hook properly [MinimumKubeletVersion:1.23] [NodeConformance]", func(ctx context.Context) { + f.It("should execute prestop https hook properly [MinimumKubeletVersion:1.23]", f.WithNodeConformance(), func(ctx context.Context) { lifecycle := &v1.Lifecycle{ PreStop: &v1.LifecycleHandler{ HTTPGet: &v1.HTTPGetAction{ @@ -253,7 +254,7 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() { }) }) -var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContainers] Restartable Init Container Lifecycle Hook", func() { +var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers]", feature.SidecarContainers, "Restartable Init Container Lifecycle Hook", func() { f := framework.NewDefaultFramework("restartable-init-container-lifecycle-hook") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline var podClient *e2epod.PodClient @@ -545,7 +546,7 @@ func getSidecarPodWithHook(name string, image string, lifecycle *v1.Lifecycle) * } } -var _ = SIGDescribe("[Feature:PodLifecycleSleepAction]", func() { +var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() { f := framework.NewDefaultFramework("pod-lifecycle-sleep-action") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline var podClient *e2epod.PodClient diff --git a/test/e2e/common/node/pod_admission.go b/test/e2e/common/node/pod_admission.go index f8b0e5ceafb25..1bec24dd0b5f7 100644 --- a/test/e2e/common/node/pod_admission.go +++ b/test/e2e/common/node/pod_admission.go @@ -32,7 +32,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = SIGDescribe("PodOSRejection [NodeConformance]", func() { +var _ = SIGDescribe("PodOSRejection", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("pod-os-rejection") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline ginkgo.Context("Kubelet", func() { diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index da6289061a9d2..c9d33906fc905 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -202,7 +202,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, assigned hostip Description: Create a Pod. Pod status MUST return successfully and contains a valid IP address. */ - framework.ConformanceIt("should get a host IP [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should get a host IP", f.WithNodeConformance(), func(ctx context.Context) { name := "pod-hostip-" + string(uuid.NewUUID()) testHostIP(ctx, podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -224,7 +224,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, lifecycle Description: A Pod is created with a unique label. Pod MUST be accessible when queried using the label selector upon creation. Add a watch, check if the Pod is running. Pod then deleted, The pod deletion timestamp is observed. The watch MUST return the pod deleted event. Query with the original selector for the Pod MUST return empty list. */ - framework.ConformanceIt("should be submitted and removed [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be submitted and removed", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-submit-remove-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -342,7 +342,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, update Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. Update the pod to change the value of the Label. Query for the Pod with the new value for the label MUST be successful. */ - framework.ConformanceIt("should be updated [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be updated", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-update-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -396,7 +396,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, ActiveDeadlineSeconds Description: Create a Pod with a unique label. Query for the Pod with the label as selector MUST be successful. The Pod is updated with ActiveDeadlineSeconds set on the Pod spec. Pod MUST terminate of the specified time elapses. */ - framework.ConformanceIt("should allow activeDeadlineSeconds to be updated [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should allow activeDeadlineSeconds to be updated", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("creating the pod") name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID()) value := strconv.Itoa(time.Now().Nanosecond()) @@ -442,7 +442,7 @@ var _ = SIGDescribe("Pods", func() { Testname: Pods, service environment variables Description: Create a server Pod listening on port 9376. A Service called fooservice is created for the server Pod listening on port 8765 targeting port 8080. If a new Pod is created in the cluster then the Pod MUST have the fooservice environment variables available from this new Pod. The new create Pod MUST have environment variables such as FOOSERVICE_SERVICE_HOST, FOOSERVICE_SERVICE_PORT, FOOSERVICE_PORT, FOOSERVICE_PORT_8765_TCP_PORT, FOOSERVICE_PORT_8765_TCP_PROTO, FOOSERVICE_PORT_8765_TCP and FOOSERVICE_PORT_8765_TCP_ADDR that are populated with proper values. */ - framework.ConformanceIt("should contain environment variables for services [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should contain environment variables for services", f.WithNodeConformance(), func(ctx context.Context) { // Make a pod that will be a service. // This pod serves its hostname via HTTP. serverName := "server-envvars-" + string(uuid.NewUUID()) @@ -534,7 +534,7 @@ var _ = SIGDescribe("Pods", func() { Description: A Pod is created. Websocket is created to retrieve exec command output from this pod. Message retrieved form Websocket MUST match with expected exec command output. */ - framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support remote command execution over websockets", f.WithNodeConformance(), func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "unable to get base config") @@ -616,7 +616,7 @@ var _ = SIGDescribe("Pods", func() { Description: A Pod is created. Websocket is created to retrieve log of a container from this pod. Message retrieved form Websocket MUST match with container's output. */ - framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support retrieving logs from the container over websockets", f.WithNodeConformance(), func(ctx context.Context) { config, err := framework.LoadConfig() framework.ExpectNoError(err, "unable to get base config") @@ -674,7 +674,7 @@ var _ = SIGDescribe("Pods", func() { }) // Slow (~7 mins) - ginkgo.It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func(ctx context.Context) { + f.It("should have their auto-restart back-off timer reset on image update", f.WithSlow(), f.WithNodeConformance(), func(ctx context.Context) { podName := "pod-back-off-image" containerName := "back-off" pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ @@ -715,7 +715,7 @@ var _ = SIGDescribe("Pods", func() { }) // Slow by design (~27 mins) issue #19027 - ginkgo.It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func(ctx context.Context) { + f.It("should cap back-off at MaxContainerBackOff", f.WithSlow(), f.WithNodeConformance(), func(ctx context.Context) { podName := "back-off-cap" containerName := "back-off-cap" pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{ @@ -769,7 +769,7 @@ var _ = SIGDescribe("Pods", func() { } }) - ginkgo.It("should support pod readiness gates [NodeConformance]", func(ctx context.Context) { + f.It("should support pod readiness gates", f.WithNodeConformance(), func(ctx context.Context) { podName := "pod-ready" readinessGate1 := "k8s.io/test-condition1" readinessGate2 := "k8s.io/test-condition2" diff --git a/test/e2e/common/node/privileged.go b/test/e2e/common/node/privileged.go index 0769047acd9a6..c382f3049d2b9 100644 --- a/test/e2e/common/node/privileged.go +++ b/test/e2e/common/node/privileged.go @@ -42,7 +42,7 @@ type PrivilegedPodTestConfig struct { pod *v1.Pod } -var _ = SIGDescribe("PrivilegedPod [NodeConformance]", func() { +var _ = SIGDescribe("PrivilegedPod", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("e2e-privileged-pod") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged config := &PrivilegedPodTestConfig{ diff --git a/test/e2e/common/node/runtime.go b/test/e2e/common/node/runtime.go index e47e791493f5c..029ee900ea289 100644 --- a/test/e2e/common/node/runtime.go +++ b/test/e2e/common/node/runtime.go @@ -49,7 +49,7 @@ var _ = SIGDescribe("Container Runtime", func() { Testname: Container Runtime, Restart Policy, Pod Phases Description: If the restart policy is set to 'Always', Pod MUST be restarted when terminated, If restart policy is 'OnFailure', Pod MUST be started only if it is terminated with non-zero exit code. If the restart policy is 'Never', Pod MUST never be restarted. All these three test cases MUST verify the restart counts accordingly. */ - framework.ConformanceIt("should run with the expected status [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should run with the expected status", f.WithNodeConformance(), func(ctx context.Context) { restartCountVolumeName := "restart-count" restartCountVolumePath := "/restart-count" testContainer := v1.Container{ @@ -127,7 +127,7 @@ while true; do sleep 1; done ginkgo.By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name)) gomega.Expect(GetContainerState(status.State)).To(gomega.Equal(testCase.State)) - ginkgo.By(fmt.Sprintf("Container '%s': should be possible to delete [NodeConformance]", testContainer.Name)) + ginkgo.By(fmt.Sprintf("Container '%s': should be possible to delete", testContainer.Name)) gomega.Expect(terminateContainer.Delete(ctx)).To(gomega.Succeed()) gomega.Eventually(ctx, terminateContainer.Present, ContainerStatusRetryTimeout, ContainerStatusPollInterval).Should(gomega.BeFalse()) } @@ -171,7 +171,7 @@ while true; do sleep 1; done gomega.Expect(c.Delete(ctx)).To(gomega.Succeed()) } - ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func(ctx context.Context) { + f.It("should report termination message if TerminationMessagePath is set", f.WithNodeConformance(), func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -192,7 +192,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessagePath, non-root user and non-default path Description: Create a pod with a container to run it as a non-root user with a custom TerminationMessagePath set. Pod redirects the output to the provided path successfully. When the container is terminated, the termination message MUST match the expected output logged in the provided custom path. */ - framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should report termination message if TerminationMessagePath is set as non-root user and at a non-default path", f.WithNodeConformance(), func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -213,7 +213,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessage, from container's log output of failing container Description: Create a pod with an container. Container's output is recorded in log and container exits with an error. When container is terminated, termination message MUST match the expected output recorded from container's log. */ - framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -229,7 +229,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessage, from log output of succeeding container Description: Create a pod with an container. Container's output is recorded in log and container exits successfully without an error. When container is terminated, terminationMessage MUST have no content as container succeed. */ - framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -245,7 +245,7 @@ while true; do sleep 1; done Testname: Container Runtime, TerminationMessage, from file of succeeding container Description: Create a pod with an container. Container's output is recorded in a file and the container exits successfully without an error. When container is terminated, terminationMessage MUST match with the content from file. */ - framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set", f.WithNodeConformance(), func(ctx context.Context) { container := v1.Container{ Image: framework.BusyBoxImage, Command: []string{"/bin/sh", "-c"}, @@ -368,23 +368,23 @@ while true; do sleep 1; done } } - ginkgo.It("should not be able to pull image from invalid registry [NodeConformance]", func(ctx context.Context) { + f.It("should not be able to pull image from invalid registry", f.WithNodeConformance(), func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.InvalidRegistryImage) imagePullTest(ctx, image, false, v1.PodPending, true, false) }) - ginkgo.It("should be able to pull image [NodeConformance]", func(ctx context.Context) { + f.It("should be able to pull image", f.WithNodeConformance(), func(ctx context.Context) { // NOTE(claudiub): The agnhost image is supposed to work on both Linux and Windows. image := imageutils.GetE2EImage(imageutils.Agnhost) imagePullTest(ctx, image, false, v1.PodRunning, false, false) }) - ginkgo.It("should not be able to pull from private registry without secret [NodeConformance]", func(ctx context.Context) { + f.It("should not be able to pull from private registry without secret", f.WithNodeConformance(), func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) imagePullTest(ctx, image, false, v1.PodPending, true, false) }) - ginkgo.It("should be able to pull from private registry with secret [NodeConformance]", func(ctx context.Context) { + f.It("should be able to pull from private registry with secret", f.WithNodeConformance(), func(ctx context.Context) { image := imageutils.GetE2EImage(imageutils.AuthenticatedAlpine) isWindows := false if framework.NodeOSDistroIs("windows") { diff --git a/test/e2e/common/node/runtimeclass.go b/test/e2e/common/node/runtimeclass.go index a6c6447ee18bf..22e8c9311a3dd 100644 --- a/test/e2e/common/node/runtimeclass.go +++ b/test/e2e/common/node/runtimeclass.go @@ -38,6 +38,7 @@ import ( e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -53,13 +54,13 @@ var _ = SIGDescribe("RuntimeClass", func() { Testname: Pod with the non-existing RuntimeClass is rejected. Description: The Pod requesting the non-existing RuntimeClass must be rejected. */ - framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should reject a Pod requesting a non-existent RuntimeClass", f.WithNodeConformance(), func(ctx context.Context) { rcName := f.Namespace.Name + "-nonexistent" expectPodRejection(ctx, f, e2eruntimeclass.NewRuntimeClassPod(rcName)) }) // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed. - ginkgo.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) { + f.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", nodefeature.RuntimeHandler, func(ctx context.Context) { handler := f.Namespace.Name + "-handler" rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) @@ -83,7 +84,7 @@ var _ = SIGDescribe("RuntimeClass", func() { // This test requires that the PreconfiguredRuntimeClassHandler has already been set up on nodes. // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler installed and working. - ginkgo.It("should run a Pod requesting a RuntimeClass with a configured handler [NodeFeature:RuntimeHandler]", func(ctx context.Context) { + f.It("should run a Pod requesting a RuntimeClass with a configured handler", nodefeature.RuntimeHandler, func(ctx context.Context) { if err := e2eruntimeclass.NodeSupportsPreconfiguredRuntimeClassHandler(ctx, f); err != nil { e2eskipper.Skipf("Skipping test as node does not have E2E runtime class handler preconfigured in container runtime config: %v", err) } @@ -102,7 +103,7 @@ var _ = SIGDescribe("RuntimeClass", func() { depends on container runtime and preconfigured handler. Runtime-specific functionality is not being tested here. */ - framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass without PodOverhead", f.WithNodeConformance(), func(ctx context.Context) { rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2eruntimeclass.PreconfiguredRuntimeClassHandler, nil) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) pod := e2epod.NewPodClient(f).Create(ctx, e2eruntimeclass.NewRuntimeClassPod(rcName)) @@ -127,7 +128,7 @@ var _ = SIGDescribe("RuntimeClass", func() { depends on container runtime and preconfigured handler. Runtime-specific functionality is not being tested here. */ - framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should schedule a Pod requesting a RuntimeClass and initialize its Overhead", f.WithNodeConformance(), func(ctx context.Context) { rcName := createRuntimeClass(ctx, f, "preconfigured-handler", e2eruntimeclass.PreconfiguredRuntimeClassHandler, &nodev1.Overhead{ PodFixed: v1.ResourceList{ v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"), @@ -154,7 +155,7 @@ var _ = SIGDescribe("RuntimeClass", func() { Testname: Pod with the deleted RuntimeClass is rejected. Description: Pod requesting the deleted RuntimeClass must be rejected. */ - framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should reject a Pod requesting a deleted RuntimeClass", f.WithNodeConformance(), func(ctx context.Context) { rcName := createRuntimeClass(ctx, f, "delete-me", "runc", nil) rcClient := f.ClientSet.NodeV1().RuntimeClasses() diff --git a/test/e2e/common/node/secrets.go b/test/e2e/common/node/secrets.go index de7bc523aec5e..885c2c4b9a949 100644 --- a/test/e2e/common/node/secrets.go +++ b/test/e2e/common/node/secrets.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets, pod environment field Description: Create a secret. Create a Pod with Container that declares a environment variable which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret. */ - framework.ConformanceIt("should be consumable from pods in env vars [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in env vars", f.WithNodeConformance(), func(ctx context.Context) { name := "secret-test-" + string(uuid.NewUUID()) secret := secretForTest(f.Namespace.Name, name) @@ -93,7 +93,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets, pod environment from source Description: Create a secret. Create a Pod with Container that declares a environment variable using 'EnvFrom' which references the secret created to extract a key value from the secret. Pod MUST have the environment variable that contains proper value for the key to the secret. */ - framework.ConformanceIt("should be consumable via the environment [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable via the environment", f.WithNodeConformance(), func(ctx context.Context) { name := "secret-test-" + string(uuid.NewUUID()) secret := secretForTest(f.Namespace.Name, name) ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) diff --git a/test/e2e/common/node/security_context.go b/test/e2e/common/node/security_context.go index 9909fb7ea6234..0a8fd653c55bd 100644 --- a/test/e2e/common/node/security_context.go +++ b/test/e2e/common/node/security_context.go @@ -26,10 +26,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" "k8s.io/utils/pointer" @@ -72,7 +74,7 @@ var _ = SIGDescribe("Security Context", func() { } } - ginkgo.It("must create the user namespace if set to false [LinuxOnly] [Feature:UserNamespacesSupport]", func(ctx context.Context) { + f.It("must create the user namespace if set to false [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) { // with hostUsers=false the pod must use a new user namespace podClient := e2epod.PodClientNS(f, f.Namespace.Name) @@ -110,7 +112,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("must not create the user namespace if set to true [LinuxOnly] [Feature:UserNamespacesSupport]", func(ctx context.Context) { + f.It("must not create the user namespace if set to true [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) { // with hostUsers=true the pod must use the host user namespace pod := makePod(true) // When running in the host's user namespace, the /proc/self/uid_map file content looks like: @@ -121,7 +123,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - ginkgo.It("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport]", func(ctx context.Context) { + f.It("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) { // Create all volume types supported: configmap, secret, downwardAPI, projected. // Create configmap. @@ -245,7 +247,7 @@ var _ = SIGDescribe("Security Context", func() { }) }) - ginkgo.It("should set FSGroup to user inside the container with hostUsers=false [LinuxOnly] [Feature:UserNamespacesSupport]", func(ctx context.Context) { + f.It("should set FSGroup to user inside the container with hostUsers=false [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) { // Create configmap. name := "userns-volumes-test-" + string(uuid.NewUUID()) configMap := newConfigMap(f, name) @@ -344,7 +346,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Container is created with runAsUser option by passing uid 65534 to run as unpriviledged user. Pod MUST be in Succeeded phase. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ - framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { createAndWaitUserPod(ctx, 65534) }) @@ -355,7 +357,7 @@ var _ = SIGDescribe("Security Context", func() { This e2e can not be promoted to Conformance because a Conformant platform may not allow to run containers with 'uid 0' or running privileged operations. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. */ - ginkgo.It("should run the container with uid 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + f.It("should run the container with uid 0 [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { createAndWaitUserPod(ctx, 0) }) }) @@ -473,7 +475,7 @@ var _ = SIGDescribe("Security Context", func() { At this moment we are not considering this test for Conformance due to use of SecurityContext. [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support creating containers with read-only access. */ - ginkgo.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + f.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { createAndWaitUserPod(ctx, true) }) @@ -483,7 +485,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Container is configured to run with readOnlyRootFilesystem to false. Write operation MUST be allowed and Pod MUST be in Succeeded state. */ - framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false", f.WithNodeConformance(), func(ctx context.Context) { createAndWaitUserPod(ctx, false) }) }) @@ -525,7 +527,7 @@ var _ = SIGDescribe("Security Context", func() { Description: Create a container to run in unprivileged mode by setting pod's SecurityContext Privileged option as false. Pod MUST be in Succeeded phase. [LinuxOnly]: This test is marked as LinuxOnly since it runs a Linux-specific command. */ - framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := createAndWaitUserPod(ctx, false) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { @@ -538,7 +540,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("should run the container as privileged when true [LinuxOnly] [NodeFeature:HostAccess]", func(ctx context.Context) { + f.It("should run the container as privileged when true [LinuxOnly]", nodefeature.HostAccess, func(ctx context.Context) { podName := createAndWaitUserPod(ctx, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { @@ -591,7 +593,7 @@ var _ = SIGDescribe("Security Context", func() { This e2e Can not be promoted to Conformance as it is Container Runtime dependent and not all conformant platforms will require this behavior. [LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID, or privilege escalation. */ - ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + f.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := "alpine-nnp-nil-" + string(uuid.NewUUID()) if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil { framework.Failf("Match output for pod %q failed: %v", podName, err) @@ -606,7 +608,7 @@ var _ = SIGDescribe("Security Context", func() { When the container is run, container's output MUST match with expected output verifying container ran with given uid i.e. uid=1000. [LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID, or privilege escalation. */ - framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := "alpine-nnp-false-" + string(uuid.NewUUID()) apeFalse := false if err := createAndMatchOutput(ctx, podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil { @@ -623,7 +625,7 @@ var _ = SIGDescribe("Security Context", func() { This e2e Can not be promoted to Conformance as it is Container Runtime dependent and runtime may not allow to run. [LinuxOnly]: This test is marked LinuxOnly since Windows does not support running as UID / GID. */ - ginkgo.It("should allow privilege escalation when true [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + f.It("should allow privilege escalation when true [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := "alpine-nnp-true-" + string(uuid.NewUUID()) apeTrue := true if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil { diff --git a/test/e2e/common/node/sysctl.go b/test/e2e/common/node/sysctl.go index 1de5be7272255..eb721effe3587 100644 --- a/test/e2e/common/node/sysctl.go +++ b/test/e2e/common/node/sysctl.go @@ -22,6 +22,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/environment" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -32,7 +33,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { +var _ = SIGDescribe("Sysctls [LinuxOnly]", framework.WithNodeConformance(), func() { ginkgo.BeforeEach(func() { // sysctl is not supported on Windows. @@ -75,7 +76,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls [Environment:NotInUserNS]: The test fails in UserNS (as expected): `open /proc/sys/kernel/shm_rmid_forced: permission denied` */ - framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21] [Environment:NotInUserNS]", func(ctx context.Context) { + framework.ConformanceIt("should support sysctls [MinimumKubeletVersion:1.21]", environment.NotInUserNS, func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ @@ -185,7 +186,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() { [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls [Environment:NotInUserNS]: The test fails in UserNS (as expected): `open /proc/sys/kernel/shm_rmid_forced: permission denied` */ - ginkgo.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23] [Environment:NotInUserNS]", func(ctx context.Context) { + f.It("should support sysctls with slashes as separator [MinimumKubeletVersion:1.23]", environment.NotInUserNS, func(ctx context.Context) { pod := testPod() pod.Spec.SecurityContext = &v1.PodSecurityContext{ Sysctls: []v1.Sysctl{ diff --git a/test/e2e/common/storage/configmap_volume.go b/test/e2e/common/storage/configmap_volume.go index 9799f3b16f82a..27b23a81118b0 100644 --- a/test/e2e/common/storage/configmap_volume.go +++ b/test/e2e/common/storage/configmap_volume.go @@ -31,6 +31,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) @@ -44,7 +45,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, without mapping Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume", f.WithNodeConformance(), func(ctx context.Context) { doConfigMapE2EWithoutMappings(ctx, f, false, 0, nil) }) @@ -54,12 +55,12 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400' This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { defaultMode := int32(0400) doConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) }) - ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ @@ -71,11 +72,11 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, without mapping, non-root user Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume as non-root", f.WithNodeConformance(), func(ctx context.Context) { doConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) @@ -86,7 +87,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, with mapping Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings", f.WithNodeConformance(), func(ctx context.Context) { doConfigMapE2EWithMappings(ctx, f, false, 0, nil) }) @@ -96,7 +97,7 @@ var _ = SIGDescribe("ConfigMap", func() { Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400' This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { mode := int32(0400) doConfigMapE2EWithMappings(ctx, f, false, 0, &mode) }) @@ -106,11 +107,11 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, with mapping, non-root user Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root", f.WithNodeConformance(), func(ctx context.Context) { doConfigMapE2EWithMappings(ctx, f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doConfigMapE2EWithMappings(ctx, f, true, 1001, nil) @@ -121,7 +122,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, update Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. */ - framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("updates should be reflected in volume", f.WithNodeConformance(), func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -172,7 +173,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, text data, binary data Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod. */ - framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("binary data should be reflected in volume", f.WithNodeConformance(), func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -237,7 +238,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, create, update and delete Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file). */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("optional updates should be reflected in volume", f.WithNodeConformance(), func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -420,7 +421,7 @@ var _ = SIGDescribe("ConfigMap", func() { Testname: ConfigMap Volume, multiple volume maps Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts. */ - framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable in multiple volumes in the same pod", f.WithNodeConformance(), func(ctx context.Context) { var ( name = "configmap-test-volume-" + string(uuid.NewUUID()) volumeName = "configmap-volume" @@ -554,7 +555,7 @@ var _ = SIGDescribe("ConfigMap", func() { // The pod is in pending during volume creation until the configMap objects are available // or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to configMap object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" pod := createNonOptionalConfigMapPod(ctx, f, volumeMountPath) getPod := e2epod.Get(f.ClientSet, pod) @@ -564,7 +565,7 @@ var _ = SIGDescribe("ConfigMap", func() { // ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap, // the volume setup will error unless it is marked optional, during the pod creation. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to the key in the configMap object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" pod := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath) getPod := e2epod.Get(f.ClientSet, pod) diff --git a/test/e2e/common/storage/downwardapi.go b/test/e2e/common/storage/downwardapi.go index 8919615e05cf4..f3c8a35582d72 100644 --- a/test/e2e/common/storage/downwardapi.go +++ b/test/e2e/common/storage/downwardapi.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" imageutils "k8s.io/kubernetes/test/utils/image" @@ -32,7 +33,7 @@ import ( "github.com/onsi/ginkgo/v2" ) -var _ = SIGDescribe("Downward API [Serial] [Disruptive] [Feature:EphemeralStorage]", func() { +var _ = SIGDescribe("Downward API", framework.WithSerial(), framework.WithDisruptive(), feature.EphemeralStorage, func() { f := framework.NewDefaultFramework("downward-api") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/common/storage/downwardapi_volume.go b/test/e2e/common/storage/downwardapi_volume.go index 07c8a2c3b174c..84716ed990b5f 100644 --- a/test/e2e/common/storage/downwardapi_volume.go +++ b/test/e2e/common/storage/downwardapi_volume.go @@ -29,6 +29,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -51,7 +52,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, pod name Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide podname only [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide podname only", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") @@ -66,7 +67,7 @@ var _ = SIGDescribe("Downward API volume", func() { Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should set DefaultMode on files [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) defaultMode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) @@ -82,7 +83,7 @@ var _ = SIGDescribe("Downward API volume", func() { Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should set mode on item file [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) mode := int32(0400) pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) @@ -92,7 +93,7 @@ var _ = SIGDescribe("Downward API volume", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -107,7 +108,7 @@ var _ = SIGDescribe("Downward API volume", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -128,7 +129,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, update label Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume. */ - framework.ConformanceIt("should update labels on modification [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should update labels on modification", f.WithNodeConformance(), func(ctx context.Context) { labels := map[string]string{} labels["key1"] = "value1" labels["key2"] = "value2" @@ -160,7 +161,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, update annotations Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume. */ - framework.ConformanceIt("should update annotations on modification [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should update annotations on modification", f.WithNodeConformance(), func(ctx context.Context) { annotations := map[string]string{} annotations["builder"] = "bar" podName := "annotationupdate" + string(uuid.NewUUID()) @@ -191,7 +192,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, CPU limits Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's cpu limit", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -205,7 +206,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, memory limits Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's memory limit", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") @@ -219,7 +220,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, CPU request Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's cpu request", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") @@ -233,7 +234,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, memory request Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume. */ - framework.ConformanceIt("should provide container's memory request [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's memory request", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") @@ -247,7 +248,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, CPU limit, default node allocatable Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable. */ - framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -259,7 +260,7 @@ var _ = SIGDescribe("Downward API volume", func() { Testname: DownwardAPI volume, memory limit, default node allocatable Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable. */ - framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") diff --git a/test/e2e/common/storage/empty_dir.go b/test/e2e/common/storage/empty_dir.go index 814c12bea0d68..310e03ac03d87 100644 --- a/test/e2e/common/storage/empty_dir.go +++ b/test/e2e/common/storage/empty_dir.go @@ -23,6 +23,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,6 +32,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) @@ -47,7 +49,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { f := framework.NewDefaultFramework("emptydir") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline - ginkgo.Context("when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup]", func() { + f.Context("when FSGroup is specified [LinuxOnly]", nodefeature.FSGroup, func() { ginkgo.BeforeEach(func() { // Windows does not support the FSGroup SecurityContext option. @@ -85,7 +87,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'. */ - framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTestVolumeMode(ctx, f, 0, v1.StorageMediumMemory) }) @@ -95,7 +97,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0644(ctx, f, 0, v1.StorageMediumMemory) }) @@ -105,7 +107,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0666(ctx, f, 0, v1.StorageMediumMemory) }) @@ -115,7 +117,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0777(ctx, f, 0, v1.StorageMediumMemory) }) @@ -125,7 +127,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0644(ctx, f, nonRootUID, v1.StorageMediumMemory) }) @@ -135,7 +137,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0666(ctx, f, nonRootUID, v1.StorageMediumMemory) }) @@ -145,7 +147,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'. */ - framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0777(ctx, f, nonRootUID, v1.StorageMediumMemory) }) @@ -155,7 +157,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTestVolumeMode(ctx, f, 0, v1.StorageMediumDefault) }) @@ -165,7 +167,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (root,0644,default) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0644(ctx, f, 0, v1.StorageMediumDefault) }) @@ -175,7 +177,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (root,0666,default) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0666(ctx, f, 0, v1.StorageMediumDefault) }) @@ -185,7 +187,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (root,0777,default) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0777(ctx, f, 0, v1.StorageMediumDefault) }) @@ -195,7 +197,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0644(ctx, f, nonRootUID, v1.StorageMediumDefault) }) @@ -205,7 +207,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0666(ctx, f, nonRootUID, v1.StorageMediumDefault) }) @@ -215,7 +217,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { doTest0777(ctx, f, nonRootUID, v1.StorageMediumDefault) }) diff --git a/test/e2e/common/storage/host_path.go b/test/e2e/common/storage/host_path.go index 672ef481e3b2f..5f59e50da5423 100644 --- a/test/e2e/common/storage/host_path.go +++ b/test/e2e/common/storage/host_path.go @@ -49,7 +49,7 @@ var _ = SIGDescribe("HostPath", func() { Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set. This test is marked LinuxOnly since Windows does not support setting the sticky bit (mode flag t). */ - ginkgo.It("should give a volume the correct mode [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + f.It("should give a volume the correct mode [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { source := &v1.HostPathVolumeSource{ Path: "/tmp", } @@ -66,7 +66,7 @@ var _ = SIGDescribe("HostPath", func() { }) // This test requires mounting a folder into a container with write privileges. - ginkgo.It("should support r/w [NodeConformance]", func(ctx context.Context) { + f.It("should support r/w", f.WithNodeConformance(), func(ctx context.Context) { filePath := path.Join(volumePath, "test-file") retryDuration := 180 source := &v1.HostPathVolumeSource{ @@ -94,7 +94,7 @@ var _ = SIGDescribe("HostPath", func() { }) }) - ginkgo.It("should support subPath [NodeConformance]", func(ctx context.Context) { + f.It("should support subPath", f.WithNodeConformance(), func(ctx context.Context) { subPath := "sub-path" fileName := "test-file" retryDuration := 180 diff --git a/test/e2e/common/storage/projected_combined.go b/test/e2e/common/storage/projected_combined.go index fd977257f2182..c477cc5a10fbd 100644 --- a/test/e2e/common/storage/projected_combined.go +++ b/test/e2e/common/storage/projected_combined.go @@ -41,7 +41,7 @@ var _ = SIGDescribe("Projected combined", func() { Testname: Projected Volume, multiple projections Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should project all components that make up the projection API [Projection]", f.WithNodeConformance(), func(ctx context.Context) { var err error podName := "projected-volume-" + string(uuid.NewUUID()) secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID()) diff --git a/test/e2e/common/storage/projected_configmap.go b/test/e2e/common/storage/projected_configmap.go index fe48c8f716d77..f80e06eb452b7 100644 --- a/test/e2e/common/storage/projected_configmap.go +++ b/test/e2e/common/storage/projected_configmap.go @@ -28,6 +28,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -44,7 +45,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, volume mode default Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume", f.WithNodeConformance(), func(ctx context.Context) { doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, nil) }) @@ -54,12 +55,12 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { defaultMode := int32(0400) doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) }) - ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ @@ -71,11 +72,11 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, non-root user Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume as non-root", f.WithNodeConformance(), func(ctx context.Context) { doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) @@ -86,7 +87,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, mapped Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings", f.WithNodeConformance(), func(ctx context.Context) { doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, nil) }) @@ -96,7 +97,7 @@ var _ = SIGDescribe("Projected configMap", func() { Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { mode := int32(0400) doProjectedConfigMapE2EWithMappings(ctx, f, false, 0, &mode) }) @@ -106,11 +107,11 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, mapped, non-root user Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root", f.WithNodeConformance(), func(ctx context.Context) { doProjectedConfigMapE2EWithMappings(ctx, f, true, 0, nil) }) - ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doProjectedConfigMapE2EWithMappings(ctx, f, true, 1001, nil) @@ -121,7 +122,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, update Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2. */ - framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("updates should be reflected in volume", f.WithNodeConformance(), func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) @@ -171,7 +172,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, create, update and delete Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container. */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("optional updates should be reflected in volume", f.WithNodeConformance(), func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -372,7 +373,7 @@ var _ = SIGDescribe("Projected configMap", func() { Testname: Projected Volume, ConfigMap, multiple volume paths Description: A Pod is created with a projected volume source 'ConfigMap' to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts. */ - framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable in multiple volumes in the same pod", f.WithNodeConformance(), func(ctx context.Context) { var ( name = "projected-configmap-test-volume-" + string(uuid.NewUUID()) volumeName = "projected-configmap-volume" @@ -460,7 +461,7 @@ var _ = SIGDescribe("Projected configMap", func() { //The pod is in pending during volume creation until the configMap objects are available //or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timeout exception unless it is marked optional. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to configMap object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/projected-configmap-volumes" pod := createNonOptionalConfigMapPod(ctx, f, volumeMountPath) getPod := e2epod.Get(f.ClientSet, pod) @@ -470,7 +471,7 @@ var _ = SIGDescribe("Projected configMap", func() { //ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap, // the volume setup will error unless it is marked optional, during the pod creation. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to the key in the configMap object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/configmap-volumes" pod := createNonOptionalConfigMapPodWithConfig(ctx, f, volumeMountPath) getPod := e2epod.Get(f.ClientSet, pod) diff --git a/test/e2e/common/storage/projected_downwardapi.go b/test/e2e/common/storage/projected_downwardapi.go index a08bcf76b61ec..afb04ba17c406 100644 --- a/test/e2e/common/storage/projected_downwardapi.go +++ b/test/e2e/common/storage/projected_downwardapi.go @@ -28,6 +28,7 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -51,7 +52,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, pod name Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide podname only [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide podname only", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname") @@ -66,7 +67,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should set DefaultMode on files [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) defaultMode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode) @@ -82,7 +83,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should set mode on item file [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) mode := int32(0400) pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil) @@ -92,7 +93,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -107,7 +108,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { }) }) - ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -128,7 +129,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, update labels Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and label items. Pod MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels are then updated. Pod MUST be able to read the updated values for the Labels. */ - framework.ConformanceIt("should update labels on modification [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should update labels on modification", f.WithNodeConformance(), func(ctx context.Context) { labels := map[string]string{} labels["key1"] = "value1" labels["key2"] = "value2" @@ -160,7 +161,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, update annotation Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and annotation items. Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. Annotations are then updated. Pod MUST be able to read the updated values for the Annotations. */ - framework.ConformanceIt("should update annotations on modification [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should update annotations on modification", f.WithNodeConformance(), func(ctx context.Context) { annotations := map[string]string{} annotations["builder"] = "bar" podName := "annotationupdate" + string(uuid.NewUUID()) @@ -191,7 +192,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, CPU limits Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's cpu limit", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -205,7 +206,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, memory limits Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's memory limit", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit") @@ -219,7 +220,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, CPU request Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu request from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's cpu request", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request") @@ -233,7 +234,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, memory request Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory request from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide container's memory request [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide container's memory request", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request") @@ -247,7 +248,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, CPU limit, node allocatable Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit") @@ -259,7 +260,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { Testname: Projected Volume, DownwardAPI, memory limit, node allocatable Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default memory limits from the mounted DownwardAPIVolumeFiles. */ - framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set", f.WithNodeConformance(), func(ctx context.Context) { podName := "downwardapi-volume-" + string(uuid.NewUUID()) pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit") diff --git a/test/e2e/common/storage/projected_secret.go b/test/e2e/common/storage/projected_secret.go index 7c81ba42b4aff..cf1f81ea61e5e 100644 --- a/test/e2e/common/storage/projected_secret.go +++ b/test/e2e/common/storage/projected_secret.go @@ -43,7 +43,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, volume mode default Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume", f.WithNodeConformance(), func(ctx context.Context) { doProjectedSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -53,7 +53,7 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0x400 on the Pod. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { defaultMode := int32(0400) doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -64,7 +64,7 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The volume has permission mode set to 0440, fsgroup set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--r-----. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) doProjectedSecretE2EWithoutMapping(ctx, f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) @@ -75,7 +75,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, mapped Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings", f.WithNodeConformance(), func(ctx context.Context) { doProjectedSecretE2EWithMapping(ctx, f, nil) }) @@ -85,12 +85,12 @@ var _ = SIGDescribe("Projected secret", func() { Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0400. The secret is also mapped to a specific name. Pod MUST be able to read the content of the key successfully and the mode MUST be -r-------- on the mapped volume. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { mode := int32(0400) doProjectedSecretE2EWithMapping(ctx, f, &mode) }) - ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) { + f.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", f.WithNodeConformance(), func(ctx context.Context) { var ( namespace2 *v1.Namespace err error @@ -116,7 +116,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, mapped, multiple paths Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The secret is mapped to two different volume mounts. Pod MUST be able to read the content of the key successfully from the two volume mounts and the mode MUST be -r-------- on the mapped volumes. */ - framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable in multiple volumes in a pod", f.WithNodeConformance(), func(ctx context.Context) { // This test ensures that the same secret can be mounted in multiple // volumes in the same pod. This test case exists to prevent // regressions that break this use-case. @@ -212,7 +212,7 @@ var _ = SIGDescribe("Projected secret", func() { Testname: Projected Volume, Secrets, create, update delete Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("optional updates should be reflected in volume", f.WithNodeConformance(), func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -411,7 +411,7 @@ var _ = SIGDescribe("Projected secret", func() { //The secret is in pending during volume creation until the secret objects are available //or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to secret object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/projected-secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) pod := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName) @@ -422,7 +422,7 @@ var _ = SIGDescribe("Projected secret", func() { //Secret object defined for the pod, If a key is specified which is not present in the secret, // the volume setup will error unless it is marked optional, during the pod creation. //Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to the key in the secret object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) pod := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName) diff --git a/test/e2e/common/storage/secrets_volume.go b/test/e2e/common/storage/secrets_volume.go index 94d55eaa2c484..a41595aa9aeea 100644 --- a/test/e2e/common/storage/secrets_volume.go +++ b/test/e2e/common/storage/secrets_volume.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, default Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume", f.WithNodeConformance(), func(ctx context.Context) { doSecretE2EWithoutMapping(ctx, f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -54,7 +54,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r-------- by default. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { defaultMode := int32(0400) doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil) }) @@ -65,7 +65,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x440 as a non-root user with uid 1000 and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r--r-----by default. This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID. */ - framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ fsGroup := int64(1001) doSecretE2EWithoutMapping(ctx, f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID) @@ -76,7 +76,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, mapping Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings", f.WithNodeConformance(), func(ctx context.Context) { doSecretE2EWithMapping(ctx, f, nil) }) @@ -86,7 +86,7 @@ var _ = SIGDescribe("Secrets", func() { Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path and file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -r--r--r--. This test is marked LinuxOnly since Windows does not support setting specific file permissions. */ - framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) { mode := int32(0400) doSecretE2EWithMapping(ctx, f, &mode) }) @@ -96,7 +96,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, volume mode default, secret with same name in different namespace Description: Create a secret with same name in two namespaces. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secrets from the mounted volume from the container runtime and only secrets which are associated with namespace where pod is created. The file mode of the secret MUST be -rw-r--r-- by default. */ - framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", f.WithNodeConformance(), func(ctx context.Context) { var ( namespace2 *v1.Namespace err error @@ -122,7 +122,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, mapping multiple volume paths Description: Create a secret. Create a Pod with two secret volume sources configured into the container in to two different custom paths. Pod MUST be able to read the secret from the both the mounted volumes from the two specified custom paths. */ - framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("should be consumable in multiple volumes in a pod", f.WithNodeConformance(), func(ctx context.Context) { // This test ensures that the same secret can be mounted in multiple // volumes in the same pod. This test case exists to prevent // regressions that break this use-case. @@ -202,7 +202,7 @@ var _ = SIGDescribe("Secrets", func() { Testname: Secrets Volume, create, update and delete Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container. */ - framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func(ctx context.Context) { + framework.ConformanceIt("optional updates should be reflected in volume", f.WithNodeConformance(), func(ctx context.Context) { podLogTimeout := e2epod.GetPodSecretUpdateTimeout(ctx, f.ClientSet) containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds())) trueVal := true @@ -436,7 +436,7 @@ var _ = SIGDescribe("Secrets", func() { // The secret is in pending during volume creation until the secret objects are available // or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to secret object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) pod := createNonOptionalSecretPod(ctx, f, volumeMountPath, podName) @@ -447,7 +447,7 @@ var _ = SIGDescribe("Secrets", func() { // Secret object defined for the pod, If a key is specified which is not present in the secret, // the volume setup will error unless it is marked optional, during the pod creation. // Slow (~5 mins) - ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func(ctx context.Context) { + f.It("Should fail non-optional pod creation due to the key in the secret object does not exist", f.WithSlow(), func(ctx context.Context) { volumeMountPath := "/etc/secret-volumes" podName := "pod-secrets-" + string(uuid.NewUUID()) pod := createNonOptionalSecretPodWithSecret(ctx, f, volumeMountPath, podName) diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index e62801f41f6c2..f4e7fdfb589dc 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -35,6 +35,7 @@ import ( "k8s.io/dynamic-resource-allocation/controller" "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/dra/test-driver/app" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -53,7 +54,7 @@ func networkResources() app.Resources { } } -var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", func() { +var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, func() { f := framework.NewDefaultFramework("dra") // The driver containers have to run with sufficient privileges to @@ -523,7 +524,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu // https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/2268-non-graceful-shutdown // NOTE: this test depends on kind. It will only work with kind cluster as it shuts down one of the // nodes by running `docker stop `, which is very kind-specific. - ginkgo.It("[Serial] [Disruptive] [Slow] must deallocate on non graceful node shutdown", func(ctx context.Context) { + f.It(f.WithSerial(), f.WithDisruptive(), f.WithSlow(), "must deallocate on non graceful node shutdown", func(ctx context.Context) { ginkgo.By("create test pod") parameters := b.parameters() label := "app.kubernetes.io/instance" diff --git a/test/e2e/feature/feature.go b/test/e2e/feature/feature.go index 461f2d0214f52..f4b9d87ce4bb0 100644 --- a/test/e2e/feature/feature.go +++ b/test/e2e/feature/feature.go @@ -103,6 +103,7 @@ var ( SeccompDefault = framework.WithFeature(framework.ValidFeatures.Add("SeccompDefault")) SELinux = framework.WithFeature(framework.ValidFeatures.Add("SELinux")) SELinuxMountReadWriteOncePod = framework.WithFeature(framework.ValidFeatures.Add("SELinuxMountReadWriteOncePod")) + ServiceCIDRs = framework.WithFeature(framework.ValidFeatures.Add("ServiceCIDRs")) SidecarContainers = framework.WithFeature(framework.ValidFeatures.Add("SidecarContainers")) StackdriverAcceleratorMonitoring = framework.WithFeature(framework.ValidFeatures.Add("StackdriverAcceleratorMonitoring")) StackdriverCustomMetrics = framework.WithFeature(framework.ValidFeatures.Add("StackdriverCustomMetrics")) diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index c95d1c40e3031..926a5c6ecafe1 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -43,7 +43,7 @@ var loggingSoak struct { } var _ = e2econfig.AddOptions(&loggingSoak, "instrumentation.logging.soak") -var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disruptive]", func() { +var _ = instrumentation.SIGDescribe("Logging soak [Performance]", framework.WithSlow(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("logging-soak") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/instrumentation/monitoring/accelerator.go b/test/e2e/instrumentation/monitoring/accelerator.go index 037d76d824c99..201c07634e2d9 100644 --- a/test/e2e/instrumentation/monitoring/accelerator.go +++ b/test/e2e/instrumentation/monitoring/accelerator.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -56,7 +57,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f := framework.NewDefaultFramework("stackdriver-monitoring") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - ginkgo.It("should have accelerator metrics [Feature:StackdriverAcceleratorMonitoring]", func(ctx context.Context) { + f.It("should have accelerator metrics", feature.StackdriverAcceleratorMonitoring, func(ctx context.Context) { testStackdriverAcceleratorMonitoring(ctx, f) }) diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index b9d3e826ba67e..4f460e24e27c5 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -30,6 +30,7 @@ import ( cacheddiscovery "k8s.io/client-go/discovery/cached/memory" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/restmapper" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" @@ -56,7 +57,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f := framework.NewDefaultFramework("stackdriver-monitoring") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - ginkgo.It("should run Custom Metrics - Stackdriver Adapter for old resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) { + f.It("should run Custom Metrics - Stackdriver Adapter for old resource model", feature.StackdriverCustomMetrics, func(ctx context.Context) { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { @@ -71,7 +72,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { testCustomMetrics(ctx, f, kubeClient, customMetricsClient, discoveryClient, AdapterForOldResourceModel) }) - ginkgo.It("should run Custom Metrics - Stackdriver Adapter for new resource model [Feature:StackdriverCustomMetrics]", func(ctx context.Context) { + f.It("should run Custom Metrics - Stackdriver Adapter for new resource model", feature.StackdriverCustomMetrics, func(ctx context.Context) { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { @@ -86,7 +87,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { testCustomMetrics(ctx, f, kubeClient, customMetricsClient, discoveryClient, AdapterForNewResourceModel) }) - ginkgo.It("should run Custom Metrics - Stackdriver Adapter for external metrics [Feature:StackdriverExternalMetrics]", func(ctx context.Context) { + f.It("should run Custom Metrics - Stackdriver Adapter for external metrics", feature.StackdriverExternalMetrics, func(ctx context.Context) { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go index 7e8f4a9afbcc8..0b4ec3b5d4be4 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver.go +++ b/test/e2e/instrumentation/monitoring/stackdriver.go @@ -24,6 +24,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -68,7 +69,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f := framework.NewDefaultFramework("stackdriver-monitoring") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - ginkgo.It("should have cluster metrics [Feature:StackdriverMonitoring]", func(ctx context.Context) { + f.It("should have cluster metrics", feature.StackdriverMonitoring, func(ctx context.Context) { testStackdriverMonitoring(ctx, f, 1, 100, 200) }) diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index 722b9e7faf79a..6108d9031a637 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -27,6 +27,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -54,7 +55,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var kubeClient clientset.Interface - ginkgo.It("should run Stackdriver Metadata Agent [Feature:StackdriverMetadataAgent]", func(ctx context.Context) { + f.It("should run Stackdriver Metadata Agent", feature.StackdriverMetadataAgent, func(ctx context.Context) { kubeClient = f.ClientSet testAgent(ctx, f, kubeClient) }) diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 5f65545412238..ca9dbe562b513 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -710,7 +710,7 @@ metadata: gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42)) }) - ginkgo.It("[Slow] running a failing command without --restart=Never", func(ctx context.Context) { + f.It(f.WithSlow(), "running a failing command without --restart=Never", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() @@ -723,7 +723,7 @@ metadata: } }) - ginkgo.It("[Slow] running a failing command without --restart=Never, but with --rm", func(ctx context.Context) { + f.It(f.WithSlow(), "running a failing command without --restart=Never, but with --rm", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42"). WithStdinData("abcd1234"). Exec() @@ -737,7 +737,7 @@ metadata: framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "failure-3", ns, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second)) }) - ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) { + f.It(f.WithSlow(), "running a failing command with --leave-stdin-open", func(ctx context.Context) { _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42"). WithStdinData("abcd1234"). Exec() @@ -1892,7 +1892,7 @@ metadata: // This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on // it, which will affect anything else running in parallel. - ginkgo.Describe("Kubectl taint [Serial]", func() { + f.Describe("Kubectl taint", framework.WithSerial(), func() { ginkgo.It("should update the taint on a node", func(ctx context.Context) { testTaint := v1.Taint{ Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())), diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go index 75580bdf27ae2..5b27da43b3108 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_signer.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_signer.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" bootstrapapi "k8s.io/cluster-bootstrap/token/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/lifecycle" admissionapi "k8s.io/pod-security-admission/api" @@ -37,7 +38,7 @@ const ( TokenSecretBytes = 8 ) -var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { +var _ = lifecycle.SIGDescribe(feature.BootstrapTokens, func() { var c clientset.Interface @@ -70,7 +71,7 @@ var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { framework.ExpectNoError(err) }) - ginkgo.It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated [Serial][Disruptive]", func(ctx context.Context) { + f.It("should resign the bootstrap tokens when the clusterInfo ConfigMap updated", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { ginkgo.By("create a new bootstrap token secret") tokenID, err := GenerateTokenID() framework.ExpectNoError(err) diff --git a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go index 96bde949d91dd..a9270496c6d0a 100644 --- a/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go +++ b/test/e2e/lifecycle/bootstrap/bootstrap_token_cleaner.go @@ -25,13 +25,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" bootstrapapi "k8s.io/cluster-bootstrap/token/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/lifecycle" admissionapi "k8s.io/pod-security-admission/api" ) var secretNeedClean string -var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() { +var _ = lifecycle.SIGDescribe(feature.BootstrapTokens, func() { var c clientset.Interface diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index f44de0d77e619..bbe6823a53cf4 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -316,7 +316,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.Context("Change stubDomain", func() { nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} - ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func(ctx context.Context) { + framework.It("should be able to change stubDomain configuration", framework.WithSlow(), framework.WithSerial(), func(ctx context.Context) { nsTest.c = nsTest.f.ClientSet nsTest.run(ctx, framework.TestContext.ClusterIsIPv6()) }) @@ -325,7 +325,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.Context("Forward PTR lookup", func() { fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} - ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func(ctx context.Context) { + framework.It("should forward PTR records lookup to upstream nameserver", framework.WithSlow(), framework.WithSerial(), func(ctx context.Context) { fwdTest.c = fwdTest.f.ClientSet fwdTest.run(ctx, framework.TestContext.ClusterIsIPv6()) }) @@ -334,7 +334,7 @@ var _ = common.SIGDescribe("DNS configMap nameserver", func() { ginkgo.Context("Forward external name lookup", func() { externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} - ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func(ctx context.Context) { + framework.It("should forward externalname lookup to upstream nameserver", framework.WithSlow(), framework.WithSerial(), func(ctx context.Context) { externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.run(ctx, framework.TestContext.ClusterIsIPv6()) }) diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 6a35b41a9db63..806ba1387dc7e 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" "k8s.io/kubernetes/test/e2e/network/common" @@ -42,7 +43,7 @@ const ( checkServicePercent = 0.05 ) -var _ = common.SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { +var _ = common.SIGDescribe(feature.PerformanceDNS, framework.WithSerial(), func() { f := framework.NewDefaultFramework("performancedns") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/network/dual_stack.go b/test/e2e/network/dual_stack.go index ce58c80530e67..bfa1527a268b3 100644 --- a/test/e2e/network/dual_stack.go +++ b/test/e2e/network/dual_stack.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" @@ -44,7 +45,7 @@ import ( ) // Tests for ipv4-ipv6 dual-stack feature -var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { +var _ = common.SIGDescribe(feature.IPv6DualStack, func() { f := framework.NewDefaultFramework("dualstack") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -111,7 +112,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { framework.ExpectNoError(err, "failed to delete pod") }) - ginkgo.It("should create pod, add ipv6 and ipv4 ip to host ips [Feature:PodHostIPs]", func(ctx context.Context) { + f.It("should create pod, add ipv6 and ipv4 ip to host ips", feature.PodHostIPs, func(ctx context.Context) { podName := "pod-dualstack-ips" pod := &v1.Pod{ @@ -498,7 +499,7 @@ var _ = common.SIGDescribe("[Feature:IPv6DualStack]", func() { } }) - ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { + f.It("should function for pod-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableDualStack, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort)) err := config.DialFromTestContainer(ctx, "sctp", config.SecondaryClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index 9b3717d550815..d780a2c75db02 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" @@ -58,7 +59,7 @@ try: except: print('err')` -var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() { +var _ = common.SIGDescribe("ClusterDns", feature.Example, func() { f := framework.NewDefaultFramework("cluster-dns") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index cf45f88a3ef0a..9f77825895109 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -71,7 +71,7 @@ var _ = common.SIGDescribe("Firewall rule", func() { }) // This test takes around 6 minutes to run - ginkgo.It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func(ctx context.Context) { + f.It(f.WithSlow(), f.WithSerial(), "should create valid firewall rules for LoadBalancer type service", func(ctx context.Context) { ns := f.Namespace.Name // This source ranges is just used to examine we have exact same things on LB firewall rules firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"} diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 7faffa1c480bc..d7bf2f43c35df 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -35,6 +35,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2eingress "k8s.io/kubernetes/test/e2e/framework/ingress" @@ -85,7 +86,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { // // Slow by design ~10m for each "It" block dominated by loadbalancer setup time // TODO: write similar tests for nginx, haproxy and AWS Ingress. - ginkgo.Describe("GCE [Slow] [Feature:Ingress]", func() { + f.Describe("GCE", framework.WithSlow(), feature.Ingress, func() { var gceController *gce.IngressController // Platform specific setup @@ -130,7 +131,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7", func() { }) - ginkgo.Describe("GCE [Slow] [Feature:NEG]", func() { + f.Describe("GCE", framework.WithSlow(), feature.NEG, func() { var gceController *gce.IngressController // Platform specific setup diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go index e67922c128b8e..a1e4f43e1b6f1 100644 --- a/test/e2e/network/ingress_scale.go +++ b/test/e2e/network/ingress_scale.go @@ -19,6 +19,7 @@ package network import ( "context" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" @@ -40,7 +41,7 @@ var _ = common.SIGDescribe("Loadbalancing: L7 Scalability", func() { ns = f.Namespace.Name }) - ginkgo.Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() { + f.Describe("GCE", framework.WithSlow(), framework.WithSerial(), feature.IngressScale, func() { var ( scaleFramework *scale.IngressScaleFramework ) diff --git a/test/e2e/network/ingressclass.go b/test/e2e/network/ingressclass.go index f113e198c6aa6..93a528e3e52ee 100644 --- a/test/e2e/network/ingressclass.go +++ b/test/e2e/network/ingressclass.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/network/common" admissionapi "k8s.io/pod-security-admission/api" @@ -38,7 +39,7 @@ import ( "github.com/onsi/gomega" ) -var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { +var _ = common.SIGDescribe("IngressClass", feature.Ingress, func() { f := framework.NewDefaultFramework("ingressclass") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var cs clientset.Interface @@ -46,7 +47,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { cs = f.ClientSet }) - ginkgo.It("should set default value on new IngressClass [Serial]", func(ctx context.Context) { + f.It("should set default value on new IngressClass", f.WithSerial(), func(ctx context.Context) { ingressClass1, err := createIngressClass(ctx, cs, "ingressclass1", true, f.UniqueName) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name) @@ -83,7 +84,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { } }) - ginkgo.It("should not set default value if no default IngressClass [Serial]", func(ctx context.Context) { + f.It("should not set default value if no default IngressClass", f.WithSerial(), func(ctx context.Context) { ingressClass1, err := createIngressClass(ctx, cs, "ingressclass1", false, f.UniqueName) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name) @@ -117,7 +118,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { } }) - ginkgo.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default[Serial]", func(ctx context.Context) { + f.It("should choose the one with the later CreationTimestamp, if equal the one with the lower name when two ingressClasses are marked as default", f.WithSerial(), func(ctx context.Context) { ingressClass1, err := createIngressClass(ctx, cs, "ingressclass1", true, f.UniqueName) framework.ExpectNoError(err) ginkgo.DeferCleanup(deleteIngressClass, cs, ingressClass1.Name) @@ -165,7 +166,7 @@ var _ = common.SIGDescribe("IngressClass [Feature:Ingress]", func() { } }) - ginkgo.It("should allow IngressClass to have Namespace-scoped parameters [Serial]", func(ctx context.Context) { + f.It("should allow IngressClass to have Namespace-scoped parameters", f.WithSerial(), func(ctx context.Context) { ingressClass := &networkingv1.IngressClass{ ObjectMeta: metav1.ObjectMeta{ Name: "ingressclass1", diff --git a/test/e2e/network/loadbalancer.go b/test/e2e/network/loadbalancer.go index cac444bc6fed0..40eff56f4d7c9 100644 --- a/test/e2e/network/loadbalancer.go +++ b/test/e2e/network/loadbalancer.go @@ -137,7 +137,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } }) - ginkgo.It("should be able to change the type and ports of a TCP service [Slow]", func(ctx context.Context) { + f.It("should be able to change the type and ports of a TCP service", f.WithSlow(), func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") @@ -330,7 +330,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) }) - ginkgo.It("should be able to change the type and ports of a UDP service [Slow]", func(ctx context.Context) { + f.It("should be able to change the type and ports of a UDP service", f.WithSlow(), func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke") @@ -524,7 +524,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) }) - ginkgo.It("should only allow access from service loadbalancer source ranges [Slow]", func(ctx context.Context) { + f.It("should only allow access from service loadbalancer source ranges", f.WithSlow(), func(ctx context.Context) { // this feature currently supported only on GCE/GKE/AWS/AZURE e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "azure") @@ -608,7 +608,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { checkReachabilityFromPod(true, loadBalancerCreateTimeout, namespace, dropPod.Name, svcIP) }) - ginkgo.It("should be able to create an internal type load balancer [Slow]", func(ctx context.Context) { + f.It("should be able to create an internal type load balancer", f.WithSlow(), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("azure", "gke", "gce") createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) @@ -738,7 +738,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]", func(ctx context.Context) { + f.It("should have session affinity work for LoadBalancer service with ESIPP on", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -749,7 +749,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [LinuxOnly]", func(ctx context.Context) { + f.It("should be able to switch session affinity for LoadBalancer service with ESIPP on", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -760,7 +760,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]", func(ctx context.Context) { + f.It("should have session affinity work for LoadBalancer service with ESIPP off", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -771,7 +771,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) // [LinuxOnly]: Windows does not support session affinity. - ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [LinuxOnly]", func(ctx context.Context) { + f.It("should be able to switch session affinity for LoadBalancer service with ESIPP off", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. e2eskipper.SkipIfProviderIs("aws") @@ -787,7 +787,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { // 2. Update service to type=ClusterIP. Finalizer should be removed. // 3. Update service to type=LoadBalancer. Finalizer should be added. // 4. Delete service with type=LoadBalancer. Finalizer should be removed. - ginkgo.It("should handle load balancer cleanup finalizer for service [Slow]", func(ctx context.Context) { + f.It("should handle load balancer cleanup finalizer for service", f.WithSlow(), func(ctx context.Context) { jig := e2eservice.NewTestJig(cs, f.Namespace.Name, "lb-finalizer") ginkgo.By("Create load balancer service") @@ -819,7 +819,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { e2eservice.WaitForServiceUpdatedWithFinalizer(ctx, cs, svc.Namespace, svc.Name, true) }) - ginkgo.It("should be able to create LoadBalancer Service without NodePort and change it [Slow]", func(ctx context.Context) { + f.It("should be able to create LoadBalancer Service without NodePort and change it", f.WithSlow(), func(ctx context.Context) { // requires cloud load-balancer support e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") @@ -1203,7 +1203,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { } }) - ginkgo.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster [Slow]", func(ctx context.Context) { + f.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Cluster", f.WithSlow(), func(ctx context.Context) { // We start with a low but reasonable threshold to analyze the results. // The goal is to achieve 99% minimum success rate. // TODO: We should do incremental steps toward the goal. @@ -1212,7 +1212,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { testRollingUpdateLBConnectivityDisruption(ctx, f, v1.ServiceExternalTrafficPolicyTypeCluster, minSuccessRate) }) - ginkgo.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Local [Slow]", func(ctx context.Context) { + f.It("should not have connectivity disruption during rolling update with externalTrafficPolicy=Local", f.WithSlow(), func(ctx context.Context) { // We start with a low but reasonable threshold to analyze the results. // The goal is to achieve 99% minimum success rate. // TODO: We should do incremental steps toward the goal. @@ -1222,7 +1222,7 @@ var _ = common.SIGDescribe("LoadBalancers", func() { }) }) -var _ = common.SIGDescribe("LoadBalancers ESIPP [Slow]", func() { +var _ = common.SIGDescribe("LoadBalancers ESIPP", framework.WithSlow(), func() { f := framework.NewDefaultFramework("esipp") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline var loadBalancerCreateTimeout time.Duration diff --git a/test/e2e/network/netpol/network_policy.go b/test/e2e/network/netpol/network_policy.go index 954c52f915005..f15aa02478904 100644 --- a/test/e2e/network/netpol/network_policy.go +++ b/test/e2e/network/netpol/network_policy.go @@ -29,6 +29,7 @@ import ( "github.com/onsi/ginkgo/v2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" @@ -116,7 +117,7 @@ var _ = common.SIGDescribe("Netpol", func() { ginkgo.Context("NetworkPolicy between server and client", func() { var k8s *kubeManager - ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should support a 'default-deny-ingress' policy", feature.NetworkPolicy, func(ctx context.Context) { // Only poll TCP protocols := []v1.Protocol{protocolTCP} @@ -143,7 +144,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should support a 'default-deny-all' policy [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should support a 'default-deny-all' policy", feature.NetworkPolicy, func(ctx context.Context) { policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules(), SetSpecEgressRules()) protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -158,7 +159,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector", feature.NetworkPolicy, func(ctx context.Context) { allowedPods := metav1.LabelSelector{ MatchLabels: map[string]string{ "pod": "b", @@ -181,7 +182,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] ", func(ctx context.Context) { + f.It("should enforce policy to allow ingress traffic for a target", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -206,7 +207,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy to allow ingress traffic from pods in all namespaces", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -220,7 +221,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -238,7 +239,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on PodSelector with MatchExpressions", feature.NetworkPolicy, func(ctx context.Context) { allowedPods := metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ Key: "pod", @@ -263,7 +264,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on NamespaceSelector with MatchExpressions", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -288,7 +289,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on PodSelector or NamespaceSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -317,7 +318,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on PodSelector and NamespaceSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -347,7 +348,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on Multiple PodSelectors and NamespaceSelectors", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -380,7 +381,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on any PodSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on any PodSelectors", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -402,7 +403,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -429,7 +430,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on Ports", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolTCP} ports := []int32{81} @@ -454,7 +455,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce multiple, stacked policies with overlapping podSelectors", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} @@ -496,7 +497,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityALLOW}) }) - ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should support allow-all policy", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Creating a network policy which allows all traffic.") policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{})) protocols := []v1.Protocol{protocolTCP} @@ -511,7 +512,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should allow ingress access on one named port", feature.NetworkPolicy, func(ctx context.Context) { IngressRules := networkingv1.NetworkPolicyIngressRule{} IngressRules.Ports = append(IngressRules.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-81-tcp"}}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(IngressRules)) @@ -532,7 +533,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort80}) }) - ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should allow ingress access from namespace on one named port", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80, 81} k8s = initializeResources(ctx, f, protocols, ports) @@ -562,7 +563,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityFAIL}) }) - ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should allow egress access on one named port", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("validating egress from port 81 to port 80") egressRule := networkingv1.NetworkPolicyEgressRule{} egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80-tcp"}}) @@ -583,7 +584,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort81}) }) - ginkgo.It("should enforce updated policy [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce updated policy", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Using the simplest possible mutation: start with allow all, then switch to deny all") // part 1) allow all policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all-mutate-to-deny-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{})) @@ -605,7 +606,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityDeny}) }) - ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should allow ingress access from updated namespace", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -636,7 +637,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityWithLabel}) }) - ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should allow ingress access from updated pod", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -665,7 +666,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityWithLabel}) }) - ginkgo.It("should deny ingress from pods on other namespaces [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should deny ingress from pods on other namespaces", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -682,7 +683,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should deny ingress access to updated pod", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -704,7 +705,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityIsolated}) }) - ginkgo.It("should deny egress from pods based on PodSelector [Feature:NetworkPolicy] ", func(ctx context.Context) { + f.It("should deny egress from pods based on PodSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -718,7 +719,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should deny egress from all pods in a namespace [Feature:NetworkPolicy] ", func(ctx context.Context) { + f.It("should deny egress from all pods in a namespace", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -732,7 +733,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should work with Ingress, Egress specified together [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should work with Ingress, Egress specified together", feature.NetworkPolicy, func(ctx context.Context) { allowedPodLabels := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "b"}} ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: allowedPodLabels}) @@ -770,7 +771,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort81}) }) - ginkgo.It("should support denying of egress traffic on the client side (even if the server explicitly allows this traffic) [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should support denying of egress traffic on the client side (even if the server explicitly allows this traffic)", feature.NetworkPolicy, func(ctx context.Context) { // x/a --> y/a and y/b // Egress allowed to y/a only. Egress to y/b should be blocked // Ingress on y/a and y/b allow traffic from x/a @@ -866,7 +867,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -892,7 +893,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP]", func(ctx context.Context) { + f.It("should enforce ingress policy allowing any port traffic to a server on a specific protocol", feature.NetworkPolicy, feature.UDP, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP, protocolUDP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -910,7 +911,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolUDP, Reachability: reachabilityUDP}) }) - ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{81} k8s = initializeResources(ctx, f, protocols, ports) @@ -935,7 +936,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll}) }) - ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce multiple egress policies with egress allow-all policy taking precedence", feature.NetworkPolicy, func(ctx context.Context) { egressRule := networkingv1.NetworkPolicyEgressRule{} egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80}}) policyAllowPort80 := GenNetworkPolicyWithNameAndPodMatchLabel("allow-egress-port-80", map[string]string{}, SetSpecEgressRules(egressRule)) @@ -960,7 +961,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll}) }) - ginkgo.It("should stop enforcing policies after they are deleted [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should stop enforcing policies after they are deleted", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Creating a network policy for the server which denies all traffic.") // Deny all traffic into and out of "x". @@ -988,7 +989,7 @@ var _ = common.SIGDescribe("Netpol", func() { // TODO, figure out how the next 3 tests should work with dual stack : do we need a different abstraction then just "podIP"? - ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should allow egress access to server in CIDR block", feature.NetworkPolicy, func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -1015,7 +1016,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce except clause while egress access to server in CIDR block", feature.NetworkPolicy, func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -1049,7 +1050,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed", feature.NetworkPolicy, func(ctx context.Context) { // Getting podServer's status to get podServer's IP, to create the CIDR with except clause protocols := []v1.Protocol{protocolTCP} ports := []int32{80} @@ -1098,7 +1099,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityAllow}) }) - ginkgo.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector", feature.NetworkPolicy, func(ctx context.Context) { /* Test steps: 1. Verify every pod in every namespace can talk to each other @@ -1135,7 +1136,7 @@ var _ = common.SIGDescribe("Netpol", func() { // This test *does* apply to plugins that do not implement SCTP. It is a // security hole if you fail this test, because you are allowing TCP // traffic that is supposed to be blocked. - ginkgo.It("should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{81} k8s = initializeResources(ctx, f, protocols, ports) @@ -1161,7 +1162,7 @@ var _ = common.SIGDescribe("Netpol", func() { // This test *does* apply to plugins that do not implement SCTP. It is a // security hole if you fail this test, because you are allowing TCP // traffic that is supposed to be blocked. - ginkgo.It("should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should properly isolate pods that are selected by a policy allowing SCTP, even if the plugin doesn't support SCTP", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Creating a network policy for the server which allows traffic only via SCTP on port 80.") ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 80}, Protocol: &protocolSCTP}) @@ -1178,7 +1179,7 @@ var _ = common.SIGDescribe("Netpol", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability}) }) - ginkgo.It("should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should not allow access by TCP when a policy specifies only UDP", feature.NetworkPolicy, func(ctx context.Context) { ingressRule := networkingv1.NetworkPolicyIngressRule{} ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolUDP}) policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-only-udp-ingress-on-port-81", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule)) @@ -1197,7 +1198,7 @@ var _ = common.SIGDescribe("Netpol", func() { }) // Note that this default ns functionality is maintained by the APIMachinery group, but we test it here anyways because its an important feature. - ginkgo.It("should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -1220,7 +1221,7 @@ var _ = common.SIGDescribe("Netpol", func() { }) // Note that this default ns functionality is maintained by the APIMachinery group, but we test it here anyways because its an important feature. - ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on NamespaceSelector with MatchExpressions using default ns label", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolTCP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -1256,7 +1257,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ginkgo.Context("NetworkPolicy between server and client using UDP", func() { - ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should support a 'default-deny-ingress' policy", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolUDP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -1270,7 +1271,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolUDP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on Ports", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Creating a network policy allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolUDP} ports := []int32{81} @@ -1295,7 +1296,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolUDP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolUDP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -1324,7 +1325,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() { }) }) -var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func() { +var _ = common.SIGDescribe("Netpol", feature.SCTPConnectivity, "[LinuxOnly]", func() { f := framework.NewDefaultFramework("sctp-network-policy") f.SkipNamespaceCreation = true f.NamespacePodSecurityLevel = admissionapi.LevelBaseline @@ -1336,7 +1337,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ginkgo.Context("NetworkPolicy between server and client using SCTP", func() { - ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should support a 'default-deny-ingress' policy", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolSCTP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) @@ -1350,7 +1351,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ValidateOrFail(k8s, &TestCase{ToPort: 80, Protocol: v1.ProtocolSCTP, Reachability: reachability}) }) - ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy based on Ports", feature.NetworkPolicy, func(ctx context.Context) { ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)") protocols := []v1.Protocol{protocolSCTP} ports := []int32{81} @@ -1374,7 +1375,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly]", func( ValidateOrFail(k8s, &TestCase{ToPort: 81, Protocol: v1.ProtocolSCTP, Reachability: reachability}) }) - ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func(ctx context.Context) { + f.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector", feature.NetworkPolicy, func(ctx context.Context) { protocols := []v1.Protocol{protocolSCTP} ports := []int32{80} k8s = initializeResources(ctx, f, protocols, ports) diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index 6a7a9532db451..ab1c63443db2f 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -41,7 +41,7 @@ import ( "github.com/onsi/gomega" ) -var _ = common.SIGDescribe("Services GCE [Slow]", func() { +var _ = common.SIGDescribe("Services GCE", framework.WithSlow(), func() { f := framework.NewDefaultFramework("services") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -65,7 +65,7 @@ var _ = common.SIGDescribe("Services GCE [Slow]", func() { //reset serviceLBNames serviceLBNames = []string{} }) - ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func(ctx context.Context) { + f.It("should be able to create and tear down a standard-tier load balancer", f.WithSlow(), func(ctx context.Context) { lagTimeout := e2eservice.LoadBalancerLagTimeoutDefault createTimeout := e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, cs) diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index 1bc1a5290e587..be7f99b077f24 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilwait "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cluster/ports" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -84,13 +85,13 @@ var _ = common.SIGDescribe("Networking", func() { f := framework.NewDefaultFramework(svcname) f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func(ctx context.Context) { + f.It("should provide Internet connection for containers", feature.NetworkingIPv4, func(ctx context.Context) { ginkgo.By("Running container which tries to connect to 8.8.8.8") framework.ExpectNoError( checkConnectivityToHost(ctx, f, "", "connectivity-test", "8.8.8.8", 53, 30)) }) - ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental][LinuxOnly]", func(ctx context.Context) { + f.It("should provide Internet connection for containers", feature.NetworkingIPv6, "[Experimental][LinuxOnly]", func(ctx context.Context) { // IPv6 is not supported on Windows. e2eskipper.SkipIfNodeOSDistroIs("windows") ginkgo.By("Running container which tries to connect to 2001:4860:4860::8888") @@ -98,7 +99,7 @@ var _ = common.SIGDescribe("Networking", func() { checkConnectivityToHost(ctx, f, "", "connectivity-test", "2001:4860:4860::8888", 53, 30)) }) - ginkgo.It("should provider Internet connection for containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { + f.It("should provider Internet connection for containers using DNS", feature.NetworkingDNS, func(ctx context.Context) { ginkgo.By("Running container which tries to connect to google.com") framework.ExpectNoError( checkConnectivityToHost(ctx, f, "", "connectivity-test", "google.com", 80, 30)) @@ -176,7 +177,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for pod-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { + f.It("should function for pod-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, e2enetwork.ClusterSCTPPort)) err := config.DialFromTestContainer(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -218,7 +219,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for node-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { + f.It("should function for node-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) { ginkgo.Skip("Skipping SCTP node to service test until DialFromNode supports SCTP #96482") config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, e2enetwork.ClusterSCTPPort)) @@ -262,7 +263,7 @@ var _ = common.SIGDescribe("Networking", func() { } }) - ginkgo.It("should function for endpoint-Service: sctp [Feature:SCTPConnectivity]", func(ctx context.Context) { + f.It("should function for endpoint-Service: sctp", feature.SCTPConnectivity, func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.EnableSCTP) ginkgo.By(fmt.Sprintf("dialing(sctp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, e2enetwork.ClusterSCTPPort)) err := config.DialFromEndpointContainer(ctx, "sctp", config.ClusterIP, e2enetwork.ClusterSCTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -359,7 +360,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - ginkgo.It("should update nodePort: http [Slow]", func(ctx context.Context) { + f.It("should update nodePort: http", f.WithSlow(), func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (ctx, nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) err := config.DialFromNode(ctx, "http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -390,7 +391,7 @@ var _ = common.SIGDescribe("Networking", func() { }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - ginkgo.It("should update nodePort: udp [Slow]", func(ctx context.Context) { + f.It("should update nodePort: udp", f.WithSlow(), func(ctx context.Context) { config := e2enetwork.NewNetworkingTestConfig(ctx, f, e2enetwork.UseHostNetwork) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP) and getting ALL host endpoints", config.NodeIP, config.NodeIP, config.NodeUDPPort)) err := config.DialFromNode(ctx, "udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) @@ -547,7 +548,7 @@ var _ = common.SIGDescribe("Networking", func() { }) - ginkgo.It("should recreate its iptables rules if they are deleted [Disruptive]", func(ctx context.Context) { + f.It("should recreate its iptables rules if they are deleted", f.WithDisruptive(), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) e2eskipper.SkipUnlessSSHKeyPresent() @@ -634,7 +635,7 @@ var _ = common.SIGDescribe("Networking", func() { // This is [Serial] because it can't run at the same time as the // [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded. - ginkgo.It("should allow creating a Pod with an SCTP HostPort [LinuxOnly] [Serial]", func(ctx context.Context) { + f.It("should allow creating a Pod with an SCTP HostPort [LinuxOnly]", f.WithSerial(), func(ctx context.Context) { node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) hostExec := utils.NewHostExec(f) diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index 3c1fa9718135b..62e72dd74340a 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" @@ -137,7 +138,7 @@ func iperf2ClientDaemonSet(ctx context.Context, client clientset.Interface, name // would require n^2 tests, n^2 time, and n^2 network resources which quickly become prohibitively large // as the cluster size increases. // Finally, after collecting all data, the results are analyzed and tabulated. -var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]", func() { +var _ = common.SIGDescribe("Networking IPerf2", feature.NetworkingPerformance, func() { // this test runs iperf2: one pod as a server, and a daemonset of clients f := framework.NewDefaultFramework("network-perf") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index 2b1a0ffba9a3f..2bb6e1fbb26af 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -63,7 +64,7 @@ var ( // This test verifies that a Pod on each node in a cluster can talk to Pods on every other node without SNAT. // We use the [Feature:NoSNAT] tag so that most jobs will skip this test by default. -var _ = common.SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { +var _ = common.SIGDescribe("NoSNAT", feature.NoSNAT, framework.WithSlow(), func() { f := framework.NewDefaultFramework("no-snat-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.It("Should be able to send traffic between Pods without SNAT", func(ctx context.Context) { diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index fbede5fa22cf7..c39cbff67fe7e 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -1195,7 +1195,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames, svc.Spec.ClusterIP, servicePort)) }) - ginkgo.It("should work after restarting kube-proxy [Disruptive]", func(ctx context.Context) { + f.It("should work after restarting kube-proxy", f.WithDisruptive(), func(ctx context.Context) { kubeProxyLabelSet := map[string]string{clusterAddonLabelKey: kubeProxyLabelName} e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeProxyLabelName, cs, metav1.NamespaceSystem, kubeProxyLabelSet) @@ -1228,7 +1228,7 @@ var _ = common.SIGDescribe("Services", func() { framework.ExpectNoError(verifyServeHostnameServiceUp(ctx, cs, ns, podNames2, svc2IP, servicePort)) }) - ginkgo.It("should work after restarting apiserver [Disruptive]", func(ctx context.Context) { + f.It("should work after restarting apiserver", f.WithDisruptive(), func(ctx context.Context) { if !framework.ProviderIs("gke") { e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) @@ -3827,7 +3827,7 @@ var _ = common.SIGDescribe("Services", func() { // These is [Serial] because it can't run at the same time as the // [Feature:SCTPConnectivity] tests, since they may cause sctp.ko to be loaded. - ginkgo.It("should allow creating a basic SCTP service with pod and endpoints [LinuxOnly] [Serial]", func(ctx context.Context) { + f.It("should allow creating a basic SCTP service with pod and endpoints [LinuxOnly]", f.WithSerial(), func(ctx context.Context) { serviceName := "sctp-endpoint-test" ns := f.Namespace.Name jig := e2eservice.NewTestJig(cs, ns, serviceName) diff --git a/test/e2e/network/service_cidrs.go b/test/e2e/network/service_cidrs.go index 47059a6d4f9f5..f6aff75a4c73c 100644 --- a/test/e2e/network/service_cidrs.go +++ b/test/e2e/network/service_cidrs.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -35,7 +36,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = common.SIGDescribe("[Feature:ServiceCIDRs]", func() { +var _ = common.SIGDescribe(feature.ServiceCIDRs, func() { fr := framework.NewDefaultFramework("servicecidrs") fr.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/network/topology_hints.go b/test/e2e/network/topology_hints.go index 27383fee4c882..ed569e7071e19 100644 --- a/test/e2e/network/topology_hints.go +++ b/test/e2e/network/topology_hints.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -40,7 +41,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = common.SIGDescribe("[Feature:Topology Hints]", func() { +var _ = common.SIGDescribe(feature.TopologyHints, func() { f := framework.NewDefaultFramework("topology-hints") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/node/examples.go b/test/e2e/node/examples.go index f06d09c293666..17ea03b2a975b 100644 --- a/test/e2e/node/examples.go +++ b/test/e2e/node/examples.go @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" commonutils "k8s.io/kubernetes/test/e2e/common" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" @@ -45,7 +46,7 @@ const ( serverStartTimeout = framework.PodStartTimeout + 3*time.Minute ) -var _ = SIGDescribe("[Feature:Example]", func() { +var _ = SIGDescribe(feature.Example, func() { f := framework.NewDefaultFramework("examples") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e/node/kubelet.go b/test/e2e/node/kubelet.go index eb6a69edfef0a..b93a8f8efd559 100644 --- a/test/e2e/node/kubelet.go +++ b/test/e2e/node/kubelet.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" @@ -456,7 +457,7 @@ var _ = SIGDescribe("kubelet", func() { }) // Tests for NodeLogQuery feature - ginkgo.Describe("kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/ [Feature:NodeLogQuery] [LinuxOnly]", func() { + f.Describe("kubectl get --raw \"/api/v1/nodes//proxy/logs/?query=/", feature.NodeLogQuery, "[LinuxOnly]", func() { var ( numNodes int nodeNames sets.String diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index b52053563cf36..230323c890702 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" kubeletstatsv1alpha1 "k8s.io/kubelet/pkg/apis/stats/v1alpha1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -195,7 +196,7 @@ func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet } // Slow by design (1 hour) -var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { +var _ = SIGDescribe("Kubelet", framework.WithSerial(), framework.WithSlow(), func() { var nodeNames sets.String f := framework.NewDefaultFramework("kubelet-perf") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -219,7 +220,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { result := om.GetLatestRuntimeOperationErrorRate(ctx) framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result)) }) - ginkgo.Describe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() { + f.Describe("regular resource usage tracking", feature.RegularResourceUsageTracking, func() { // We assume that the scheduler will make reasonable scheduling choices // and assign ~N pods on the node. // Although we want to track N pods per node, there are N + add-on pods @@ -271,7 +272,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() { }) } }) - ginkgo.Describe("experimental resource usage tracking [Feature:ExperimentalResourceUsageTracking]", func() { + f.Describe("experimental resource usage tracking", feature.ExperimentalResourceUsageTracking, func() { density := []int{100} for i := range density { podsPerNode := density[i] diff --git a/test/e2e/node/pod_gc.go b/test/e2e/node/pod_gc.go index 3329093a771c3..bd87e7a6d4ab4 100644 --- a/test/e2e/node/pod_gc.go +++ b/test/e2e/node/pod_gc.go @@ -27,6 +27,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -35,7 +37,7 @@ import ( // This test requires that --terminated-pod-gc-threshold=100 be set on the controller manager // // Slow by design (7 min) -var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]", func() { +var _ = SIGDescribe("Pod garbage collector", feature.PodGarbageCollector, framework.WithSlow(), func() { f := framework.NewDefaultFramework("pod-garbage-collector") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.It("should handle the creation of 1000 pods", func(ctx context.Context) { diff --git a/test/e2e/node/pod_resize.go b/test/e2e/node/pod_resize.go index 786e6a1b27ef4..705fed23de0ab 100644 --- a/test/e2e/node/pod_resize.go +++ b/test/e2e/node/pod_resize.go @@ -35,6 +35,7 @@ import ( resourceapi "k8s.io/kubernetes/pkg/api/v1/resource" kubecm "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -1682,11 +1683,11 @@ func doPodResizeSchedulerTests() { }) } -var _ = SIGDescribe("[Serial] Pod InPlace Resize Container (scheduler-focused) [Feature:InPlacePodVerticalScaling]", func() { +var _ = SIGDescribe(framework.WithSerial(), "Pod InPlace Resize Container (scheduler-focused)", feature.InPlacePodVerticalScaling, func() { doPodResizeSchedulerTests() }) -var _ = SIGDescribe("Pod InPlace Resize Container [Feature:InPlacePodVerticalScaling]", func() { +var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalScaling, func() { doPodResizeTests() doPodResizeResourceQuotaTests() doPodResizeErrorTests() diff --git a/test/e2e/node/runtimeclass.go b/test/e2e/node/runtimeclass.go index cc33b951ac3d8..bf65dacae43a0 100644 --- a/test/e2e/node/runtimeclass.go +++ b/test/e2e/node/runtimeclass.go @@ -67,7 +67,7 @@ var _ = SIGDescribe("RuntimeClass", func() { } }) - ginkgo.It("should run a Pod requesting a RuntimeClass with scheduling with taints [Serial] ", func(ctx context.Context) { + f.It("should run a Pod requesting a RuntimeClass with scheduling with taints", f.WithSerial(), func(ctx context.Context) { labelFooName := "foo-" + string(uuid.NewUUID()) labelFizzName := "fizz-" + string(uuid.NewUUID()) diff --git a/test/e2e/node/taints.go b/test/e2e/node/taints.go index 8e7416fa3207c..663173e03bf2a 100644 --- a/test/e2e/node/taints.go +++ b/test/e2e/node/taints.go @@ -161,7 +161,7 @@ const ( // - lack of eviction of tolerating pods from a tainted node, // - delayed eviction of short-tolerating pod from a tainted node, // - lack of eviction of short-tolerating pod after taint removal. -var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { +var _ = SIGDescribe("NoExecuteTaintManager Single Pod", framework.WithSerial(), func() { var cs clientset.Interface var ns string f := framework.NewDefaultFramework("taint-single-pod") @@ -287,7 +287,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { Description: The Pod with toleration timeout scheduled on a tainted Node MUST not be evicted if the taint is removed before toleration time ends. */ - framework.ConformanceIt("removing taint cancels eviction [Disruptive]", func(ctx context.Context) { + framework.ConformanceIt("removing taint cancels eviction", f.WithDisruptive(), func(ctx context.Context) { podName := "taint-eviction-4" pod := createPodForTaintsTest(true, 2*additionalWaitPerDeleteSeconds, podName, podName, ns) observedDeletions := make(chan string, 100) @@ -370,7 +370,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { }) }) -var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { +var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods", framework.WithSerial(), func() { var cs clientset.Interface var ns string f := framework.NewDefaultFramework("taint-multiple-pods") @@ -447,7 +447,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { Description: In a multi-pods scenario with tolerationSeconds, the pods MUST be evicted as per the toleration time limit. */ - framework.ConformanceIt("evicts pods with minTolerationSeconds [Disruptive]", func(ctx context.Context) { + framework.ConformanceIt("evicts pods with minTolerationSeconds", f.WithDisruptive(), func(ctx context.Context) { podGroup := "taint-eviction-b" observedDeletions := make(chan string, 100) createTestController(ctx, cs, observedDeletions, podGroup, ns) diff --git a/test/e2e/nodefeature/nodefeature.go b/test/e2e/nodefeature/nodefeature.go index ed0b3f091b32d..42c073a66a761 100644 --- a/test/e2e/nodefeature/nodefeature.go +++ b/test/e2e/nodefeature/nodefeature.go @@ -42,11 +42,11 @@ var ( NodeProblemDetector = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("NodeProblemDetector")) OOMScoreAdj = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("OOMScoreAdj")) PodDisruptionConditions = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodDisruptionConditions")) + PodHostIPs = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodHostIPs")) PodResources = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("PodResources")) ResourceMetrics = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("ResourceMetrics")) RuntimeHandler = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("RuntimeHandler")) SystemNodeCriticalPod = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("SystemNodeCriticalPod")) - TopologyManager = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("TopologyManager")) ) func init() { diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index d5ea5b9c57522..9dc4bc7316b3c 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" e2egpu "k8s.io/kubernetes/test/e2e/framework/gpu" @@ -221,7 +222,7 @@ func logContainers(ctx context.Context, f *framework.Framework, pod *v1.Pod) { } } -var _ = SIGDescribe("[Feature:GPUDevicePlugin]", func() { +var _ = SIGDescribe(feature.GPUDevicePlugin, func() { f := framework.NewDefaultFramework("device-plugin-gpus") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.It("run Nvidia GPU Device Plugin tests", func(ctx context.Context) { @@ -320,7 +321,7 @@ func podNames(pods []v1.Pod) []string { return originalPodNames } -var _ = SIGDescribe("GPUDevicePluginAcrossRecreate [Feature:Recreate]", func() { +var _ = SIGDescribe("GPUDevicePluginAcrossRecreate", feature.Recreate, func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessProviderIs("gce", "gke") }) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 357cc8260e2f7..3761cde4a2201 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -34,6 +34,7 @@ import ( utilversion "k8s.io/apimachinery/pkg/util/version" clientset "k8s.io/client-go/kubernetes" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass" @@ -79,7 +80,7 @@ type pausePodConfig struct { SchedulingGates []v1.PodSchedulingGate } -var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { +var _ = SIGDescribe("SchedulerPredicates", framework.WithSerial(), func() { var cs clientset.Interface var nodeList *v1.NodeList var RCName string @@ -126,7 +127,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { // This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage resource requests of pods is greater than machines capacity. // It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods. // It is so because we need to have precise control on what's running in the cluster. - ginkgo.It("validates local ephemeral storage resource limits of pods that are allowed to run [Feature:LocalStorageCapacityIsolation]", func(ctx context.Context) { + f.It("validates local ephemeral storage resource limits of pods that are allowed to run", feature.LocalStorageCapacityIsolation, func(ctx context.Context) { e2eskipper.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery()) diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index d1883d23ba57c..e01f195633c6d 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -64,7 +64,7 @@ const ( testFinalizer = "example.com/test-finalizer" ) -var _ = SIGDescribe("SchedulerPreemption [Serial]", func() { +var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() { var cs clientset.Interface var nodeList *v1.NodeList var ns string diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index bc1f98c85b285..21f83d3978437 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -86,7 +86,7 @@ func nodesAreTooUtilized(ctx context.Context, cs clientset.Interface, nodeList * } // This test suite is used to verifies scheduler priority functions based on the default provider -var _ = SIGDescribe("SchedulerPriorities [Serial]", func() { +var _ = SIGDescribe("SchedulerPriorities", framework.WithSerial(), func() { var cs clientset.Interface var nodeList *v1.NodeList var systemPodsNo int diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 73d5cacd0d1cb..86a07ffa2dce6 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -68,11 +68,11 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() { err = createBalancedPodForNodes(ctx, f, cs, f.Namespace.Name, nodeList.Items, podRequestedResource, 0.0) framework.ExpectNoError(err) }) - ginkgo.It("should spread the pods of a service across zones [Serial]", func(ctx context.Context) { + f.It("should spread the pods of a service across zones", f.WithSerial(), func(ctx context.Context) { SpreadServiceOrFail(ctx, f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName()) }) - ginkgo.It("should spread the pods of a replication controller across zones [Serial]", func(ctx context.Context) { + f.It("should spread the pods of a replication controller across zones", f.WithSerial(), func(ctx context.Context) { SpreadRCOrFail(ctx, f, int32(5*zoneCount), zoneNames, framework.ServeHostnameImage, []string{"serve-hostname"}) }) }) diff --git a/test/e2e/storage/csi_mock/csi_attach_volume.go b/test/e2e/storage/csi_mock/csi_attach_volume.go index 3d6de13990303..9b263ea6cee32 100644 --- a/test/e2e/storage/csi_mock/csi_attach_volume.go +++ b/test/e2e/storage/csi_mock/csi_attach_volume.go @@ -109,7 +109,7 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() { }) ginkgo.Context("CSI CSIDriver deployment after pod creation using non-attachable mock driver", func() { - ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func(ctx context.Context) { + f.It("should bringup pod after deploying CSIDriver attach=false", f.WithSlow(), func(ctx context.Context) { var err error m.init(ctx, testParameters{registerDriver: false, disableAttach: true}) ginkgo.DeferCleanup(m.cleanup) diff --git a/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go b/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go index 36ca8a63a690b..fbfe6cc2b2b17 100644 --- a/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go +++ b/test/e2e/storage/csi_mock/csi_node_stage_error_cases.go @@ -39,7 +39,7 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() { f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged m := newMockDriverSetup(f) - ginkgo.Context("CSI NodeStage error cases [Slow]", func() { + f.Context("CSI NodeStage error cases", f.WithSlow(), func() { trackedCalls := []string{ "NodeStageVolume", "NodeUnstageVolume", @@ -205,7 +205,7 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() { } }) - ginkgo.Context("CSI NodeUnstage error cases [Slow]", func() { + f.Context("CSI NodeUnstage error cases", f.WithSlow(), func() { trackedCalls := []string{ "NodeStageVolume", "NodeUnstageVolume", diff --git a/test/e2e/storage/csi_mock/csi_selinux_mount.go b/test/e2e/storage/csi_mock/csi_selinux_mount.go index ceede0dbc2bef..cd5cebaa7ab92 100644 --- a/test/e2e/storage/csi_mock/csi_selinux_mount.go +++ b/test/e2e/storage/csi_mock/csi_selinux_mount.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" @@ -45,7 +46,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() { f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged m := newMockDriverSetup(f) - ginkgo.Context("SELinuxMount [LinuxOnly][Feature:SELinux]", func() { + f.Context("SELinuxMount [LinuxOnly]", feature.SELinux, func() { // Make sure all options are set so system specific defaults are not used. seLinuxOpts1 := v1.SELinuxOptions{ User: "system_u", @@ -250,7 +251,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics", func() { m := newMockDriverSetup(f) // [Serial]: the tests read global kube-controller-manager metrics, so no other test changes them in parallel. - ginkgo.Context("SELinuxMount metrics [LinuxOnly][Feature:SELinux][Feature:SELinuxMountReadWriteOncePod][Serial]", func() { + f.Context("SELinuxMount metrics [LinuxOnly]", feature.SELinux, feature.SELinuxMountReadWriteOncePod, f.WithSerial(), func() { // All SELinux metrics. Unless explicitly mentioned in test.expectIncreases, these metrics must not grow during // a test. diff --git a/test/e2e/storage/csi_mock/csi_snapshot.go b/test/e2e/storage/csi_mock/csi_snapshot.go index f96c36932ca4f..60c3f4589c39d 100644 --- a/test/e2e/storage/csi_mock/csi_snapshot.go +++ b/test/e2e/storage/csi_mock/csi_snapshot.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -45,7 +46,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged m := newMockDriverSetup(f) - ginkgo.Context("CSI Volume Snapshots [Feature:VolumeSnapshotDataSource]", func() { + f.Context("CSI Volume Snapshots", feature.VolumeSnapshotDataSource, func() { tests := []struct { name string createSnapshotHook func(counter int64) error @@ -172,7 +173,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { } }) - ginkgo.Context("CSI Volume Snapshots secrets [Feature:VolumeSnapshotDataSource]", func() { + f.Context("CSI Volume Snapshots secrets", feature.VolumeSnapshotDataSource, func() { var ( // CSISnapshotterSecretName is the name of the secret to be created @@ -281,7 +282,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() { } }) - ginkgo.Context("CSI Snapshot Controller metrics [Feature:VolumeSnapshotDataSource]", func() { + f.Context("CSI Snapshot Controller metrics", feature.VolumeSnapshotDataSource, func() { tests := []struct { name string pattern storageframework.TestPattern diff --git a/test/e2e/storage/csi_mock/csi_volume_expansion.go b/test/e2e/storage/csi_mock/csi_volume_expansion.go index 6e7330d782793..24e135af51be1 100644 --- a/test/e2e/storage/csi_mock/csi_volume_expansion.go +++ b/test/e2e/storage/csi_mock/csi_volume_expansion.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/drivers" @@ -396,7 +397,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() { } }) - ginkgo.Context("Expansion with recovery[Feature:RecoverVolumeExpansionFailure]", func() { + f.Context("Expansion with recovery", feature.RecoverVolumeExpansionFailure, func() { tests := []recoveryTest{ { name: "should record target size in allocated resources", diff --git a/test/e2e/storage/csi_mock/csi_volume_limit.go b/test/e2e/storage/csi_mock/csi_volume_limit.go index e990faf6536ed..b3799d5af3cb6 100644 --- a/test/e2e/storage/csi_mock/csi_volume_limit.go +++ b/test/e2e/storage/csi_mock/csi_volume_limit.go @@ -40,7 +40,7 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { m := newMockDriverSetup(f) ginkgo.Context("CSI volume limit information using mock driver", func() { - ginkgo.It("should report attach limit when limit is bigger than 0 [Slow]", func(ctx context.Context) { + f.It("should report attach limit when limit is bigger than 0", f.WithSlow(), func(ctx context.Context) { // define volume limit to be 2 for this test var err error m.init(ctx, testParameters{attachLimit: 2}) @@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3) }) - ginkgo.It("should report attach limit for generic ephemeral volume when persistent volume is attached [Slow]", func(ctx context.Context) { + f.It("should report attach limit for generic ephemeral volume when persistent volume is attached", f.WithSlow(), func(ctx context.Context) { // define volume limit to be 2 for this test var err error m.init(ctx, testParameters{attachLimit: 1}) @@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() { framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod2) }) - ginkgo.It("should report attach limit for persistent volume when generic ephemeral volume is attached [Slow]", func(ctx context.Context) { + f.It("should report attach limit for persistent volume when generic ephemeral volume is attached", f.WithSlow(), func(ctx context.Context) { // define volume limit to be 2 for this test var err error m.init(ctx, testParameters{attachLimit: 1}) diff --git a/test/e2e/storage/detach_mounted.go b/test/e2e/storage/detach_mounted.go index d21cb177f27a5..130d809e8ceb3 100644 --- a/test/e2e/storage/detach_mounted.go +++ b/test/e2e/storage/detach_mounted.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -44,7 +45,7 @@ var ( durationForStuckMount = 110 * time.Second ) -var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { +var _ = utils.SIGDescribe(feature.Flexvolumes, "Detaching volumes", func() { f := framework.NewDefaultFramework("flexvolume") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -69,7 +70,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Detaching volumes", func() { suffix = ns.Name }) - ginkgo.It("should not work when mount is in progress [Slow]", func(ctx context.Context) { + f.It("should not work when mount is in progress", f.WithSlow(), func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() driver := "attachable-with-long-mount" diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index 9dd75ef6dade3..ff6fcb30b36d3 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -185,7 +185,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { Testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race Description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes. */ - framework.ConformanceIt("should not cause race condition when used for configmaps [Serial]", func(ctx context.Context) { + framework.ConformanceIt("should not cause race condition when used for configmaps", f.WithSerial(), func(ctx context.Context) { configMapNames := createConfigmapsForRace(ctx, f) ginkgo.DeferCleanup(deleteConfigMaps, f, configMapNames) volumes, volumeMounts := makeConfigMapVolumes(configMapNames) @@ -198,7 +198,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { // This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance. // To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. // This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem. - ginkgo.It("should not cause race condition when used for git_repo [Serial] [Slow]", func(ctx context.Context) { + f.It("should not cause race condition when used for git_repo", f.WithSerial(), f.WithSlow(), func(ctx context.Context) { gitURL, gitRepo, cleanup := createGitServer(ctx, f) defer cleanup() volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo) diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 258c2e6f6c2bc..2b8c0d2c43b5b 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -27,6 +27,7 @@ import ( "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -200,7 +201,7 @@ var _ = utils.SIGDescribe("Flexvolumes", func() { uninstallFlex(ctx, cs, node, "k8s", driverInstallAs) }) - ginkgo.It("should be mountable when attachable [Feature:Flexvolumes]", func(ctx context.Context) { + f.It("should be mountable when attachable", feature.Flexvolumes, func(ctx context.Context) { driver := "dummy-attachable" driverInstallAs := driver + "-" + suffix diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index 947794ae12d3e..6c373f01257d3 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -46,7 +47,7 @@ const ( totalResizeWaitPeriod = 10 * time.Minute ) -var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]", func() { +var _ = utils.SIGDescribe(feature.Flexvolumes, "Mounted flexvolume expand", framework.WithSlow(), func() { var ( c clientset.Interface ns string diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index de596e42a6697..a73f02bb5fda8 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -40,7 +41,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expand [Slow]", func() { +var _ = utils.SIGDescribe(feature.Flexvolumes, "Mounted flexvolume volume expand", framework.WithSlow(), func() { var ( c clientset.Interface ns string diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index 300a92950457c..8ac6e1648a24f 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -34,7 +34,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = utils.SIGDescribe("GenericPersistentVolume[Disruptive]", func() { +var _ = utils.SIGDescribe("GenericPersistentVolume", framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("generic-disruptive-pv") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/host_path_type.go b/test/e2e/storage/host_path_type.go index 4f3e592727741..5b1a815499501 100644 --- a/test/e2e/storage/host_path_type.go +++ b/test/e2e/storage/host_path_type.go @@ -37,7 +37,7 @@ import ( "github.com/onsi/gomega" ) -var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() { +var _ = utils.SIGDescribe("HostPathType Directory", framework.WithSlow(), func() { f := framework.NewDefaultFramework("host-path-type-directory") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("HostPathType Directory [Slow]", func() { }) }) -var _ = utils.SIGDescribe("HostPathType File [Slow]", func() { +var _ = utils.SIGDescribe("HostPathType File", framework.WithSlow(), func() { f := framework.NewDefaultFramework("host-path-type-file") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -173,7 +173,7 @@ var _ = utils.SIGDescribe("HostPathType File [Slow]", func() { }) }) -var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() { +var _ = utils.SIGDescribe("HostPathType Socket", framework.WithSlow(), func() { f := framework.NewDefaultFramework("host-path-type-socket") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -239,7 +239,7 @@ var _ = utils.SIGDescribe("HostPathType Socket [Slow]", func() { }) }) -var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { +var _ = utils.SIGDescribe("HostPathType Character Device", framework.WithSlow(), func() { f := framework.NewDefaultFramework("host-path-type-char-dev") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -309,7 +309,7 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() { }) }) -var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() { +var _ = utils.SIGDescribe("HostPathType Block Device", framework.WithSlow(), func() { f := framework.NewDefaultFramework("host-path-type-block-dev") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index f31391b562b70..6f3cefd794467 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/client/conditions" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -44,7 +45,7 @@ import ( "k8s.io/kubernetes/test/e2e/storage/utils" ) -var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", func() { +var _ = utils.SIGDescribe("Mounted volume expand", feature.StorageProvider, func() { var ( c clientset.Interface ns string diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 391142edc0744..d5577b06c6ae5 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -75,7 +75,7 @@ func checkForControllerManagerHealthy(ctx context.Context, duration time.Duratio return nil } -var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { +var _ = utils.SIGDescribe("NFSPersistentVolumes", framework.WithDisruptive(), "[Flaky]", func() { f := framework.NewDefaultFramework("disruptive-pv") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/storage/non_graceful_node_shutdown.go b/test/e2e/storage/non_graceful_node_shutdown.go index 2563c5d7c92f8..960123239b606 100644 --- a/test/e2e/storage/non_graceful_node_shutdown.go +++ b/test/e2e/storage/non_graceful_node_shutdown.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -57,7 +58,7 @@ This test performs the following: - Removes the `out-of-service` taint from the node. */ -var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [LinuxOnly] NonGracefulNodeShutdown", func() { +var _ = utils.SIGDescribe(feature.NodeOutOfServiceVolumeDetach, framework.WithDisruptive(), "[LinuxOnly] NonGracefulNodeShutdown", func() { var ( c clientset.Interface ns string diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index ea7f4b1dd19e2..7a353a7a943c0 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -57,7 +58,7 @@ const ( minNodes = 2 ) -var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { +var _ = utils.SIGDescribe("Pod Disks", feature.StorageProvider, func() { var ( ns string cs clientset.Interface @@ -89,7 +90,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name) }) - ginkgo.Context("schedule pods each with a PD, delete pod and verify detach [Slow]", func() { + f.Context("schedule pods each with a PD, delete pod and verify detach", f.WithSlow(), func() { const ( podDefaultGrace = "default (30s)" podImmediateGrace = "immediate (0s)" @@ -227,7 +228,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { } }) - ginkgo.Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession [Slow]", func() { + f.Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession", f.WithSlow(), func() { type testT struct { numContainers int numPDs int @@ -316,7 +317,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { } }) - ginkgo.Context("detach in a disrupted environment [Slow] [Disruptive]", func() { + f.Context("detach in a disrupted environment", f.WithSlow(), f.WithDisruptive(), func() { const ( deleteNode = 1 // delete physical node deleteNodeObj = 2 // delete node's api object only @@ -455,7 +456,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() { // This test is marked to run as serial so as device selection on AWS does not // conflict with other concurrent attach operations. - ginkgo.It("[Serial] attach on previously attached volumes should work", func(ctx context.Context) { + f.It(f.WithSerial(), "attach on previously attached volumes should work", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") ginkgo.By("creating PD") diskName, err := e2epv.CreatePDWithRetry(ctx) diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index f34926a111d44..c547cdaa6c347 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/providers/gce" @@ -59,7 +60,7 @@ func initializeGCETestSpec(ctx context.Context, c clientset.Interface, t *framew } // Testing configurations of single a PV/PVC pair attached to a GCE PD -var _ = utils.SIGDescribe("PersistentVolumes GCEPD [Feature:StorageProvider]", func() { +var _ = utils.SIGDescribe("PersistentVolumes GCEPD", feature.StorageProvider, func() { var ( c clientset.Interface diskName string diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index 1583ed5feb158..20d9c4ac66d23 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -186,14 +186,13 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() { // New variable required for gingko test closures testVolType := tempTestVolType - serialStr := "" + args := []interface{}{fmt.Sprintf("[Volume type: %s]", testVolType)} if testVolType == GCELocalSSDVolumeType { - serialStr = " [Serial]" + args = append(args, framework.WithSerial()) } - ctxString := fmt.Sprintf("[Volume type: %s]%v", testVolType, serialStr) testMode := immediateMode - ginkgo.Context(ctxString, func() { + args = append(args, func() { var testVol *localTestVolume ginkgo.BeforeEach(func(ctx context.Context) { @@ -276,14 +275,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() { } }) - ginkgo.It("should set fsGroup for one pod [Slow]", func(ctx context.Context) { + f.It("should set fsGroup for one pod", f.WithSlow(), func(ctx context.Context) { ginkgo.By("Checking fsGroup is set") pod := createPodWithFsGroupTest(ctx, config, testVol, 1234, 1234) ginkgo.By("Deleting pod") e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod.Name) }) - ginkgo.It("should set same fsGroup for two pods simultaneously [Slow]", func(ctx context.Context) { + f.It("should set same fsGroup for two pods simultaneously", f.WithSlow(), func(ctx context.Context) { fsGroup := int64(1234) ginkgo.By("Create first pod and check fsGroup is set") pod1 := createPodWithFsGroupTest(ctx, config, testVol, fsGroup, fsGroup) @@ -309,11 +308,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() { e2epod.DeletePodOrFail(ctx, config.client, config.ns, pod2.Name) }) }) - }) + f.Context(args...) } - ginkgo.Context("Local volume that cannot be mounted [Slow]", func() { + f.Context("Local volume that cannot be mounted", f.WithSlow(), func() { // TODO: // - check for these errors in unit tests instead ginkgo.It("should fail due to non-existent path", func(ctx context.Context) { @@ -393,7 +392,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() { }) }) - ginkgo.Context("StatefulSet with pod affinity [Slow]", func() { + f.Context("StatefulSet with pod affinity", f.WithSlow(), func() { var testVols map[string][]*localTestVolume const ( ssReplicas = 3 @@ -450,7 +449,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local", func() { }) }) - ginkgo.Context("Stress with local volumes [Serial]", func() { + f.Context("Stress with local volumes", f.WithSerial(), func() { var ( allLocalVolumes = make(map[string][]*localTestVolume) volType = TmpfsLocalVolumeType diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 8571da30b4985..3bbcaad53cb7d 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/util/retry" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -212,7 +213,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { }) // Create new PV without claim, verify it's in Available state and LastPhaseTransitionTime is set. - ginkgo.It("create a PV: test phase transition timestamp is set and phase is Available [Feature:PersistentVolumeLastPhaseTransitionTime]", func(ctx context.Context) { + f.It("create a PV: test phase transition timestamp is set and phase is Available", feature.PersistentVolumeLastPhaseTransitionTime, func(ctx context.Context) { pvObj := e2epv.MakePersistentVolume(pvConfig) pv, err = e2epv.CreatePV(ctx, c, f.Timeouts, pvObj) framework.ExpectNoError(err) @@ -231,7 +232,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create PV and pre-bound PVC that matches the PV, verify that when PV and PVC bind // the LastPhaseTransitionTime filed of the PV is updated. - ginkgo.It("create a PV and a pre-bound PVC: test phase transition timestamp is set [Feature:PersistentVolumeLastPhaseTransitionTime]", func(ctx context.Context) { + f.It("create a PV and a pre-bound PVC: test phase transition timestamp is set", feature.PersistentVolumeLastPhaseTransitionTime, func(ctx context.Context) { pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) @@ -251,7 +252,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create PV and pre-bound PVC that matches the PV, verify that when PV and PVC bind // the LastPhaseTransitionTime field of the PV is set, then delete the PVC to change PV phase to // released and validate PV LastPhaseTransitionTime correctly updated timestamp. - ginkgo.It("create a PV and a pre-bound PVC: test phase transition timestamp multiple updates [Feature:PersistentVolumeLastPhaseTransitionTime]", func(ctx context.Context) { + f.It("create a PV and a pre-bound PVC: test phase transition timestamp multiple updates", feature.PersistentVolumeLastPhaseTransitionTime, func(ctx context.Context) { pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true) framework.ExpectNoError(err) @@ -331,7 +332,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { // Create 4 PVs and 2 PVCs. // Note: PVs are created before claims and no pre-binding. - ginkgo.It("should create 4 PVs and 2 PVCs: test write access [Slow]", func(ctx context.Context) { + f.It("should create 4 PVs and 2 PVCs: test write access", f.WithSlow(), func(ctx context.Context) { numPVs, numPVCs := 4, 2 pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig) framework.ExpectNoError(err) @@ -798,7 +799,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { e2estatefulset.DeleteAllStatefulSets(ctx, c, ns) }) - ginkgo.It("should be reschedulable [Slow]", func(ctx context.Context) { + f.It("should be reschedulable", f.WithSlow(), func(ctx context.Context) { // Only run on providers with default storageclass e2epv.SkipIfNoDefaultStorageClass(ctx, c) diff --git a/test/e2e/storage/pvc_storageclass.go b/test/e2e/storage/pvc_storageclass.go index a262666e8f336..c60972edbe423 100644 --- a/test/e2e/storage/pvc_storageclass.go +++ b/test/e2e/storage/pvc_storageclass.go @@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { } }) - ginkgo.Describe("Retroactive StorageClass assignment [Serial][Disruptive]", func() { + f.Describe("Retroactive StorageClass assignment", framework.WithSerial(), framework.WithDisruptive(), func() { ginkgo.It("should assign default SC to PVCs that have no SC set", func(ctx context.Context) { // Temporarily set all default storage classes as non-default diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 787672e9e825c..600896fe7019e 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -76,25 +76,25 @@ var _ = utils.SIGDescribe("Regional PD", func() { }) ginkgo.Describe("RegionalPD", func() { - ginkgo.It("should provision storage [Slow]", func(ctx context.Context) { + f.It("should provision storage", f.WithSlow(), func(ctx context.Context) { testVolumeProvisioning(ctx, c, f.Timeouts, ns) }) - ginkgo.It("should provision storage with delayed binding [Slow]", func(ctx context.Context) { + f.It("should provision storage with delayed binding", f.WithSlow(), func(ctx context.Context) { testRegionalDelayedBinding(ctx, c, ns, 1 /* pvcCount */) testRegionalDelayedBinding(ctx, c, ns, 3 /* pvcCount */) }) - ginkgo.It("should provision storage in the allowedTopologies [Slow]", func(ctx context.Context) { + f.It("should provision storage in the allowedTopologies", f.WithSlow(), func(ctx context.Context) { testRegionalAllowedTopologies(ctx, c, ns) }) - ginkgo.It("should provision storage in the allowedTopologies with delayed binding [Slow]", func(ctx context.Context) { + f.It("should provision storage in the allowedTopologies with delayed binding", f.WithSlow(), func(ctx context.Context) { testRegionalAllowedTopologiesWithDelayedBinding(ctx, c, ns, 1 /* pvcCount */) testRegionalAllowedTopologiesWithDelayedBinding(ctx, c, ns, 3 /* pvcCount */) }) - ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func(ctx context.Context) { + f.It("should failover to a different zone when all nodes in one zone become unreachable", f.WithSlow(), f.WithDisruptive(), func(ctx context.Context) { testZonalFailover(ctx, c, ns) }) }) diff --git a/test/e2e/storage/static_pods.go b/test/e2e/storage/static_pods.go index badc9033dcc5f..395919f97327b 100644 --- a/test/e2e/storage/static_pods.go +++ b/test/e2e/storage/static_pods.go @@ -28,6 +28,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" @@ -42,7 +43,7 @@ import ( // NOTE: these tests *require* the cluster under test to be Kubernetes In Docker (kind)! // Kind runs its API server as a static Pod, and we leverage it here // to test kubelet starting without the API server. -var _ = utils.SIGDescribe("StaticPods [Feature:Kind]", func() { +var _ = utils.SIGDescribe("StaticPods", feature.Kind, func() { f := framework.NewDefaultFramework("static-pods-csi") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -61,7 +62,7 @@ var _ = utils.SIGDescribe("StaticPods [Feature:Kind]", func() { // Test https://github.com/kubernetes/kubernetes/issues/117745 // I.e. kubelet starts and it must start the API server as a static pod, // while there is a CSI volume mounted by the previous kubelet. - ginkgo.It("should run after kubelet stopped with CSI volume mounted [Disruptive][Serial]", func(ctx context.Context) { + f.It("should run after kubelet stopped with CSI volume mounted", f.WithDisruptive(), f.WithSerial(), func(ctx context.Context) { var timeout int64 = 5 ginkgo.By("Provision a new CSI volume") diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index b6e14500d02f9..9a7d5c80427d8 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -207,26 +208,26 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa } multiplePodTests := []multiplePodTest{ { - testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]", + testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns", runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1) }, }, { - testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns [Feature:SELinux]", + testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns", runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1) }, }, { - testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]", + testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns", changeSELinuxContexts: true, runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1) }, }, { - testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinux]", + testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns", changeSELinuxContexts: true, runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) { storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1) @@ -237,7 +238,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa for _, test := range multiplePodTests { func(t multiplePodTest) { if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil { - ginkgo.It(t.testItStmt, func(ctx context.Context) { + f.It(t.testItStmt, feature.SELinux, func(ctx context.Context) { init(ctx, []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}) ginkgo.DeferCleanup(cleanup) diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 8f3263c8fb9b9..8c4363f050145 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -51,7 +51,8 @@ var _ storageframework.TestSuite = &multiVolumeTestSuite{} func InitCustomMultiVolumeTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite { return &multiVolumeTestSuite{ tsInfo: storageframework.TestSuiteInfo{ - Name: "multiVolume [Slow]", + Name: "multiVolume", + TestTags: []interface{}{framework.WithSlow()}, TestPatterns: patterns, SupportedSizeRange: e2evolume.SizeRange{ Min: "1Mi", @@ -370,7 +371,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] // | | <- same volume mode // [volume1] -> [cloned volume1] - ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func(ctx context.Context) { + f.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly]", feature.VolumeSourceXFS, func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index a89af2cffa61d..d767e358135f5 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -39,6 +39,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -199,7 +200,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func(ctx context.Context) { + f.It("should provision storage with snapshot data source", feature.VolumeSnapshotDataSource, func(ctx context.Context) { if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] { e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name) } @@ -235,7 +236,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]", func(ctx context.Context) { + f.It("should provision storage with snapshot data source (ROX mode)", feature.VolumeSnapshotDataSource, func(ctx context.Context) { if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] { e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name) } @@ -277,7 +278,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with any volume data source [Serial]", func(ctx context.Context) { + f.It("should provision storage with any volume data source", f.WithSerial(), func(ctx context.Context) { if len(dInfo.InTreePluginName) != 0 { e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping") } @@ -449,7 +450,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision correct filesystem size when restoring snapshot to larger size pvc [Feature:VolumeSnapshotDataSource]", func(ctx context.Context) { + f.It("should provision correct filesystem size when restoring snapshot to larger size pvc", feature.VolumeSnapshotDataSource, func(ctx context.Context) { //TODO: remove skip when issue is resolved - https://github.com/kubernetes/kubernetes/issues/113359 if framework.NodeOSDistroIs("windows") { e2eskipper.Skipf("Test is not valid Windows - skipping") @@ -612,7 +613,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func(ctx context.Context) { + f.It("should provision storage with pvc data source in parallel", f.WithSlow(), func(ctx context.Context) { // Test cloning a single volume multiple times. if !dInfo.Capabilities[storageframework.CapPVCDataSource] { e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) diff --git a/test/e2e/storage/testsuites/snapshottable_stress.go b/test/e2e/storage/testsuites/snapshottable_stress.go index ab5a9da34c1c6..b381e67c481e1 100644 --- a/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/test/e2e/storage/testsuites/snapshottable_stress.go @@ -238,7 +238,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resources") } - ginkgo.It("should support snapshotting of many volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { + f.It("should support snapshotting of many volumes repeatedly", f.WithSlow(), f.WithSerial(), func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) createPodsAndVolumes(ctx) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 810b2c7b916c9..95bf85b17121f 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -237,7 +237,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte TestBasicSubpath(ctx, f, f.Namespace.Name, l.pod) }) - ginkgo.It("should fail if subpath directory is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should fail if subpath directory is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) @@ -253,7 +253,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(ctx, f, l.pod, false) }) - ginkgo.It("should fail if subpath file is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should fail if subpath file is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) @@ -264,7 +264,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(ctx, f, l.pod, false) }) - ginkgo.It("should fail if non-existent subpath is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should fail if non-existent subpath is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) @@ -275,7 +275,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(ctx, f, l.pod, false) }) - ginkgo.It("should fail if subpath with backstepping is outside the volume [Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should fail if subpath with backstepping is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) @@ -291,7 +291,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodFailSubpath(ctx, f, l.pod, false) }) - ginkgo.It("should support creating multiple subpath from same volumes [Slow]", func(ctx context.Context) { + f.It("should support creating multiple subpath from same volumes", f.WithSlow(), func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) @@ -317,7 +317,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testMultipleReads(ctx, f, l.pod, 0, filepath1, filepath2) }) - ginkgo.It("should support restarting containers using directory as subpath [Slow]", func(ctx context.Context) { + f.It("should support restarting containers using directory as subpath", f.WithSlow(), func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) @@ -328,7 +328,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodContainerRestart(ctx, f, l.pod) }) - ginkgo.It("should support restarting containers using file as subpath [Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should support restarting containers using file as subpath", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) @@ -338,7 +338,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testPodContainerRestart(ctx, f, l.pod) }) - ginkgo.It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should unmount if pod is gracefully deleted while kubelet is down", f.WithDisruptive(), f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() init(ctx) ginkgo.DeferCleanup(cleanup) @@ -351,7 +351,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testSubpathReconstruction(ctx, f, l.hostExec, l.pod, false) }) - ginkgo.It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should unmount if pod is force deleted while kubelet is down", f.WithDisruptive(), f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() init(ctx) ginkgo.DeferCleanup(cleanup) @@ -422,7 +422,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0) }) - ginkgo.It("should verify container cannot write to subpath readonly volumes [Slow]", func(ctx context.Context) { + f.It("should verify container cannot write to subpath readonly volumes", f.WithSlow(), func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) if l.roVolSource == nil { diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index e0d0eb5d8f8c5..dce9baf767cfc 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -139,7 +139,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } - ginkgo.It("should write files of various sizes, verify size, validate content [Slow]", func(ctx context.Context) { + f.It("should write files of various sizes, verify size, validate content", f.WithSlow(), func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index 82cb7e8631de6..fe866b1fc6481 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -185,7 +185,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, l.migrationCheck.validateMigrationVolumeOpCounts(ctx) } - ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func(ctx context.Context) { + f.It("multiple pods should access different volumes repeatedly", f.WithSlow(), f.WithSerial(), func(ctx context.Context) { init(ctx) ginkgo.DeferCleanup(cleanup) createPodsAndVolumes(ctx) diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 12a4bfc0c7af9..53a7b794c5b19 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -123,7 +123,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, // And one extra pod with a CSI volume should get Pending with a condition // that says it's unschedulable because of volume limit. // BEWARE: the test may create lot of volumes and it's really slow. - ginkgo.It("should support volume limits [Serial]", func(ctx context.Context) { + f.It("should support volume limits", f.WithSerial(), func(ctx context.Context) { driverInfo := driver.GetDriverInfo() if !driverInfo.Capabilities[storageframework.CapVolumeLimits] { ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name)) diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 28f1f1a71bccc..26ac8630e3722 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -196,7 +196,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa switch pattern.VolType { case storageframework.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { - ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func(ctx context.Context) { + f.It("should fail to create pod by failing to mount volume", f.WithSlow(), func(ctx context.Context) { manualInit(ctx) ginkgo.DeferCleanup(cleanup) @@ -257,7 +257,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa case storageframework.DynamicPV: if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { - ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func(ctx context.Context) { + f.It("should fail in binding dynamic provisioned PV to PVC", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) { manualInit(ctx) ginkgo.DeferCleanup(cleanup) @@ -296,7 +296,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType) } - ginkgo.It("should fail to use a volume in a pod with mismatched mode [Slow]", func(ctx context.Context) { + f.It("should fail to use a volume in a pod with mismatched mode", f.WithSlow(), func(ctx context.Context) { skipTestIfBlockNotSupported(driver) init(ctx) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange diff --git a/test/e2e/storage/testsuites/volumeperf.go b/test/e2e/storage/testsuites/volumeperf.go index d152df864cea7..da82e46e9bfc8 100644 --- a/test/e2e/storage/testsuites/volumeperf.go +++ b/test/e2e/storage/testsuites/volumeperf.go @@ -150,7 +150,7 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri } }) - ginkgo.It("should provision volumes at scale within performance constraints [Slow] [Serial]", func(ctx context.Context) { + f.It("should provision volumes at scale within performance constraints", f.WithSlow(), f.WithSerial(), func(ctx context.Context) { l = &local{ cs: f.ClientSet, ns: f.Namespace, diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 3ff05fc09128c..2ba8b6a9f79a3 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -45,7 +45,7 @@ import ( // This test needs to run in serial because other tests could interfere // with metrics being tested here. -var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { +var _ = utils.SIGDescribe(framework.WithSerial(), "Volume metrics", func() { var ( c clientset.Interface ns string @@ -468,7 +468,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { }) // TODO(mauriciopoppe): after CSIMigration is turned on we're no longer reporting // the volume_provision metric (removed in #106609), issue to investigate the bug #106773 - ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func(ctx context.Context) { + f.It("should create prometheus metrics for volume provisioning errors", f.WithSlow(), func(ctx context.Context) { provisioningError(ctx, isEphemeral) }) ginkgo.It("should create volume metrics with the correct FilesystemMode PVC ref", func(ctx context.Context) { diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 9ccd2ea42d4d1..a18bc995d0fcf 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -37,6 +37,7 @@ import ( "k8s.io/apiserver/pkg/authentication/serviceaccount" clientset "k8s.io/client-go/kubernetes" storageutil "k8s.io/kubernetes/pkg/apis/storage/util" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -86,7 +87,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { timeouts = f.Timeouts }) - ginkgo.Describe("DynamicProvisioner [Slow] [Feature:StorageProvider]", func() { + f.Describe("DynamicProvisioner", framework.WithSlow(), feature.StorageProvider, func() { ginkgo.It("should provision storage with different parameters", func(ctx context.Context) { // This test checks that dynamic provisioning can provision a volume @@ -459,7 +460,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) ginkgo.Describe("DynamicProvisioner External", func() { - ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func(ctx context.Context) { + f.It("should let an external dynamic provisioner create and delete persistent volumes", f.WithSlow(), func(ctx context.Context) { // external dynamic provisioner pods need additional permissions provided by the // persistent-volume-provisioner clusterrole and a leader-locking role serviceAccountName := "default" @@ -523,7 +524,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) ginkgo.Describe("DynamicProvisioner Default", func() { - ginkgo.It("should create and delete default persistent volumes [Slow]", func(ctx context.Context) { + f.It("should create and delete default persistent volumes", f.WithSlow(), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(ctx, c) @@ -547,7 +548,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) // Modifying the default storage class can be disruptive to other tests that depend on it - ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func(ctx context.Context) { + f.It("should be disabled by changing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(ctx, c) @@ -584,7 +585,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) // Modifying the default storage class can be disruptive to other tests that depend on it - ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func(ctx context.Context) { + f.It("should be disabled by removing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(ctx, c) diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index 2c5d51514ffe6..81d8e1c5c92d8 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -34,7 +35,7 @@ import ( ) // Testing configurations of single a PV/PVC pair attached to a vSphere Disk -var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("PersistentVolumes:vsphere", feature.Vsphere, func() { var ( c clientset.Interface ns string @@ -157,7 +158,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() 2. Restart kubelet 3. Verify that written file is accessible after kubelet restart */ - ginkgo.It("should test that a file written to the vsphere volume mount before kubelet restart can be read after restart [Disruptive]", func(ctx context.Context) { + f.It("should test that a file written to the vsphere volume mount before kubelet restart can be read after restart", f.WithDisruptive(), func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() utils.TestKubeletRestartsAndRestoresMount(ctx, c, f, clientPod, e2epod.VolumeMountPath1) }) @@ -173,7 +174,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func() 4. Start kubelet. 5. Verify that volume mount not to be found. */ - ginkgo.It("should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func(ctx context.Context) { + f.It("should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns", f.WithDisruptive(), func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() utils.TestVolumeUnmountsFromDeletedPod(ctx, c, f, clientPod, e2epod.VolumeMountPath1) }) diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 7755d5424bbd7..5342cf29ccca8 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -26,6 +26,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -35,7 +36,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPolicy]", func() { +var _ = utils.SIGDescribe("PersistentVolumes", feature.Vsphere, feature.ReclaimPolicy, func() { f := framework.NewDefaultFramework("persistentvolumereclaim") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( @@ -53,7 +54,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) }) - ginkgo.Describe("persistentvolumereclaim:vsphere [Feature:vsphere]", func() { + f.Describe("persistentvolumereclaim:vsphere", feature.Vsphere, func() { ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("vsphere") ginkgo.DeferCleanup(testCleanupVSpherePersistentVolumeReclaim, c, nodeInfo, ns, volumePath, pv, pvc) diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index a7c1f33fc122d..2444c57595397 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -48,7 +49,7 @@ Test Steps 8. verify associated pv is also deleted. 9. delete pvcVvol */ -var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSelector]", func() { +var _ = utils.SIGDescribe("PersistentVolumes", feature.Vsphere, feature.LabelSelector, func() { f := framework.NewDefaultFramework("pvclabelselector") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( @@ -77,7 +78,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele }) - ginkgo.Describe("Selector-Label Volume Binding:vsphere [Feature:vsphere]", func() { + f.Describe("Selector-Label Volume Binding:vsphere", feature.Vsphere, func() { ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("Running clean up actions") if framework.ProviderIs("vsphere") { diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 442d4a84447a2..f21fda1ebf116 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -27,6 +27,7 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -56,7 +57,7 @@ type NodeSelector struct { labelValue string } -var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("vcp at scale", feature.Vsphere, func() { f := framework.NewDefaultFramework("vcp-at-scale") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 6efea9c46826e..9929315f3e55e 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -25,6 +25,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -55,7 +56,7 @@ const ( storageclassname = "nginx-sc" ) -var _ = utils.SIGDescribe("vsphere statefulset [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("vsphere statefulset", feature.Vsphere, func() { f := framework.NewDefaultFramework("vsphere-statefulset") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 57fd15b52d75d..2083a290eb190 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -27,6 +27,7 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -46,7 +47,7 @@ The following actions will be performed as part of this test. 4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS 5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC. */ -var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("vsphere cloud provider stress", feature.Vsphere, func() { f := framework.NewDefaultFramework("vcp-stress") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index 43aebbab2f7e8..cc4ab2fda799d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -23,6 +23,7 @@ import ( "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -40,7 +41,7 @@ This test reads env 1. CLUSTER_DATASTORE which should be set to clustered datastore 2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore */ -var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-provision") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index 467b88a0e9531..9e82cdc643197 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -28,6 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -50,7 +51,7 @@ const ( 4. Verify the error returned on PVC failure is the correct. */ -var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Provisioning on Datastore", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-datastore") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 576473fa9adff..4c50b285a8504 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -56,7 +57,7 @@ import ( 11. Delete PVC, PV and Storage Class */ -var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Disk Format", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-disk-format") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged const ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index 8b4532fc5f5d6..ab6e301595033 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -47,7 +48,7 @@ const ( 3. Verify the provisioned PV size is correct. */ -var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Disk Size", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-disksize") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index ef3b151064358..20c4ce43c070b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -26,6 +26,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -68,7 +69,7 @@ const ( 7. Verify if the MountVolume.MountDevice fails because it is unable to find the file system executable file on the node. */ -var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume FStype", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-fstype") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index 3313de5e4b344..2bfebd09ba97d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/cluster/ports" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -100,7 +101,7 @@ For the number of schedulable nodes, 6. Delete the pod and wait for the volume to be detached 7. Delete the volume */ -var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disruptive]", func() { +var _ = utils.SIGDescribe("Volume Attach Verify", feature.Vsphere, framework.WithSerial(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("restart-master") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index f1640e1c30807..e601b3afd0b1d 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -24,6 +24,7 @@ import ( "github.com/vmware/govmomi/object" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -31,7 +32,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive]", func() { +var _ = utils.SIGDescribe("Node Unregister", feature.Vsphere, framework.WithSlow(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("node-unregister") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index 22cc7e2fae2a9..295b5d7787520 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -45,7 +46,7 @@ Test to verify volume status after node power off: 1. Verify the pod got provisioned on a different node with volume attached to it 2. Verify the volume is detached from the powered off node */ -var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", func() { +var _ = utils.SIGDescribe("Node Poweroff", feature.Vsphere, framework.WithSlow(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("node-poweroff") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index 7a12259371b96..637c58014317f 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -28,6 +28,7 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" @@ -52,7 +53,7 @@ import ( 10. Delete storage class. */ -var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Operations Storm", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-ops-storm") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged const defaultVolumeOpsScale = 30 diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index 72ebea37c5a55..0d71117edd094 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -27,6 +27,7 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -55,7 +56,7 @@ const ( DeleteOp = "DeleteOp" ) -var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("vcp-performance", feature.Vsphere, func() { f := framework.NewDefaultFramework("vcp-performance") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 2de65f694a16c..1729b50701ef0 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -28,6 +28,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -37,7 +38,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Volume Placement", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-placement") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged const ( diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 6015675a39838..5b0737e8cffbc 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -52,7 +53,7 @@ For the number of schedulable nodes: 9. Delete the Pod and wait for the Volume to be detached. 10. Delete the Volume. */ -var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vsphere][Serial][Disruptive]", func() { +var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart", feature.Vsphere, framework.WithSerial(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("restart-vpxd") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 8fa89fc92da16..b2ad094861dda 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -30,6 +30,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -75,7 +76,7 @@ const ( */ -var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning", feature.Vsphere, func() { f := framework.NewDefaultFramework("volume-vsan-policy") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index eab143afff44c..f990baa54559d 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" volumeevents "k8s.io/kubernetes/pkg/controller/volume/events" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -86,7 +87,7 @@ import ( 5. Tests to verify dynamic pv creation using availability zones work across different datacenters in the same VC. */ -var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() { +var _ = utils.SIGDescribe("Zone Support", feature.Vsphere, func() { f := framework.NewDefaultFramework("zone-support") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e/windows/cpu_limits.go b/test/e2e/windows/cpu_limits.go index bc799af3d04dc..15a7ac8f69b88 100644 --- a/test/e2e/windows/cpu_limits.go +++ b/test/e2e/windows/cpu_limits.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -35,7 +36,7 @@ import ( "github.com/onsi/gomega" ) -var _ = sigDescribe("[Feature:Windows] Cpu Resources [Serial]", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "Cpu Resources", framework.WithSerial(), skipUnlessWindows(func() { f := framework.NewDefaultFramework("cpu-resources-test-windows") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index ceb98cbf5c330..ed2996811043c 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -40,7 +41,7 @@ import ( "github.com/onsi/gomega" ) -var _ = sigDescribe("[Feature:Windows] Density [Serial] [Slow]", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "Density", framework.WithSerial(), framework.WithSlow(), skipUnlessWindows(func() { f := framework.NewDefaultFramework("density-test-windows") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/device_plugin.go b/test/e2e/windows/device_plugin.go index 416e1969a747a..a96abcd7eb5cb 100644 --- a/test/e2e/windows/device_plugin.go +++ b/test/e2e/windows/device_plugin.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2edaemonset "k8s.io/kubernetes/test/e2e/framework/daemonset" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -39,7 +40,7 @@ const ( testSlowMultiplier = 60 ) -var _ = sigDescribe("[Feature:GPUDevicePlugin] Device Plugin", skipUnlessWindows(func() { +var _ = sigDescribe(feature.GPUDevicePlugin, "Device Plugin", skipUnlessWindows(func() { f := framework.NewDefaultFramework("device-plugin") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/dns.go b/test/e2e/windows/dns.go index a9de772307670..cbe1f8f612c5c 100644 --- a/test/e2e/windows/dns.go +++ b/test/e2e/windows/dns.go @@ -22,6 +22,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -31,7 +32,7 @@ import ( "github.com/onsi/gomega" ) -var _ = sigDescribe("[Feature:Windows] DNS", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "DNS", skipUnlessWindows(func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessNodeOSDistroIs("windows") diff --git a/test/e2e/windows/gmsa_full.go b/test/e2e/windows/gmsa_full.go index 2a3f94a98d1c4..eacd693e16d38 100644 --- a/test/e2e/windows/gmsa_full.go +++ b/test/e2e/windows/gmsa_full.go @@ -56,6 +56,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -90,7 +91,7 @@ const ( gmsaSharedFolder = "write_test" ) -var _ = sigDescribe("[Feature:Windows] GMSA Full [Serial] [Slow]", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framework.WithSlow(), skipUnlessWindows(func() { f := framework.NewDefaultFramework("gmsa-full-test-windows") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/gmsa_kubelet.go b/test/e2e/windows/gmsa_kubelet.go index a7d041fcd977f..7dea9594d8c3e 100644 --- a/test/e2e/windows/gmsa_kubelet.go +++ b/test/e2e/windows/gmsa_kubelet.go @@ -29,6 +29,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -39,7 +40,7 @@ import ( "github.com/onsi/gomega" ) -var _ = sigDescribe("[Feature:Windows] GMSA Kubelet [Slow]", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "GMSA Kubelet", framework.WithSlow(), skipUnlessWindows(func() { f := framework.NewDefaultFramework("gmsa-kubelet-test-windows") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/host_process.go b/test/e2e/windows/host_process.go index d9562ec56198a..1d794313ba4a8 100644 --- a/test/e2e/windows/host_process.go +++ b/test/e2e/windows/host_process.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" @@ -85,7 +86,7 @@ var ( User_NTAuthoritySystem = "NT AUTHORITY\\SYSTEM" ) -var _ = sigDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers", skipUnlessWindows(func() { +var _ = sigDescribe(feature.WindowsHostProcessContainers, "[MinimumKubeletVersion:1.22] HostProcess containers", skipUnlessWindows(func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessNodeOSDistroIs("windows") }) diff --git a/test/e2e/windows/hybrid_network.go b/test/e2e/windows/hybrid_network.go index e2d05a3ec3ce4..e802a30172e77 100644 --- a/test/e2e/windows/hybrid_network.go +++ b/test/e2e/windows/hybrid_network.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -76,7 +77,7 @@ var _ = sigDescribe("Hybrid cluster network", skipUnlessWindows(func() { }) - ginkgo.It("should provide Internet connection for Linux containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { + f.It("should provide Internet connection for Linux containers using DNS", feature.NetworkingDNS, func(ctx context.Context) { linuxPod := createTestPod(f, linuxBusyBoxImage, linuxOS) ginkgo.By("creating a linux pod and waiting for it to be running") linuxPod = e2epod.NewPodClient(f).CreateSync(ctx, linuxPod) @@ -87,7 +88,7 @@ var _ = sigDescribe("Hybrid cluster network", skipUnlessWindows(func() { assertConsistentConnectivity(ctx, f, linuxPod.ObjectMeta.Name, linuxOS, linuxCheck("8.8.8.8", 53)) }) - ginkgo.It("should provide Internet connection for Windows containers using DNS [Feature:Networking-DNS]", func(ctx context.Context) { + f.It("should provide Internet connection for Windows containers using DNS", feature.NetworkingDNS, func(ctx context.Context) { windowsPod := createTestPod(f, windowsBusyBoximage, windowsOS) ginkgo.By("creating a windows pod and waiting for it to be running") windowsPod = e2epod.NewPodClient(f).CreateSync(ctx, windowsPod) diff --git a/test/e2e/windows/hyperv.go b/test/e2e/windows/hyperv.go index edeccc5feace3..f066e24dedbcf 100644 --- a/test/e2e/windows/hyperv.go +++ b/test/e2e/windows/hyperv.go @@ -24,6 +24,7 @@ import ( "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -35,7 +36,7 @@ var ( WindowsHyperVContainerRuntimeClass = "runhcs-wcow-hypervisor" ) -var _ = sigDescribe("[Feature:WindowsHyperVContainers] HyperV containers", skipUnlessWindows(func() { +var _ = sigDescribe(feature.WindowsHyperVContainers, "HyperV containers", skipUnlessWindows(func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessNodeOSDistroIs("windows") }) diff --git a/test/e2e/windows/kubelet_stats.go b/test/e2e/windows/kubelet_stats.go index 1acdbc14b7744..3570bb01daf58 100644 --- a/test/e2e/windows/kubelet_stats.go +++ b/test/e2e/windows/kubelet_stats.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -37,7 +38,7 @@ import ( "github.com/onsi/gomega" ) -var _ = sigDescribe("[Feature:Windows] Kubelet-Stats [Serial]", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "Kubelet-Stats", framework.WithSerial(), skipUnlessWindows(func() { f := framework.NewDefaultFramework("kubelet-stats-test-windows-serial") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -115,7 +116,7 @@ var _ = sigDescribe("[Feature:Windows] Kubelet-Stats [Serial]", skipUnlessWindow }) })) -var _ = sigDescribe("[Feature:Windows] Kubelet-Stats", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "Kubelet-Stats", skipUnlessWindows(func() { f := framework.NewDefaultFramework("kubelet-stats-test-windows") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/memory_limits.go b/test/e2e/windows/memory_limits.go index d108e2a495b52..de978ea26ad85 100644 --- a/test/e2e/windows/memory_limits.go +++ b/test/e2e/windows/memory_limits.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" @@ -39,7 +40,7 @@ import ( "github.com/onsi/gomega" ) -var _ = sigDescribe("[Feature:Windows] Memory Limits [Serial] [Slow]", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "Memory Limits", framework.WithSerial(), framework.WithSlow(), skipUnlessWindows(func() { f := framework.NewDefaultFramework("memory-limit-test-windows") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/reboot_node.go b/test/e2e/windows/reboot_node.go index a1b7790f60ff6..9798e3b11d7b9 100644 --- a/test/e2e/windows/reboot_node.go +++ b/test/e2e/windows/reboot_node.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -34,7 +35,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = sigDescribe("[Feature:Windows] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] RebootHost containers [Serial] [Disruptive] [Slow]", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "[Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] RebootHost containers", framework.WithSerial(), framework.WithDisruptive(), framework.WithSlow(), skipUnlessWindows(func() { ginkgo.BeforeEach(func() { e2eskipper.SkipUnlessNodeOSDistroIs("windows") }) diff --git a/test/e2e/windows/security_context.go b/test/e2e/windows/security_context.go index b7049e69559d0..06501a857ed7b 100644 --- a/test/e2e/windows/security_context.go +++ b/test/e2e/windows/security_context.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -40,7 +41,7 @@ import ( const runAsUserNameContainerName = "run-as-username-container" -var _ = sigDescribe("[Feature:Windows] SecurityContext", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "SecurityContext", skipUnlessWindows(func() { f := framework.NewDefaultFramework("windows-run-as-username") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/windows/volumes.go b/test/e2e/windows/volumes.go index 31fb6918cb432..87cb54404d993 100644 --- a/test/e2e/windows/volumes.go +++ b/test/e2e/windows/volumes.go @@ -23,6 +23,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -44,7 +45,7 @@ var ( image = imageutils.GetE2EImage(imageutils.Pause) ) -var _ = sigDescribe("[Feature:Windows] Windows volume mounts", skipUnlessWindows(func() { +var _ = sigDescribe(feature.Windows, "Windows volume mounts", skipUnlessWindows(func() { f := framework.NewDefaultFramework("windows-volumes") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 12eb320c7458c..8340a35e5e3d2 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -38,8 +38,10 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/kubelet/kuberuntime" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -47,7 +49,7 @@ import ( "github.com/opencontainers/runc/libcontainer/apparmor" ) -var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() { +var _ = SIGDescribe("AppArmor", feature.AppArmor, nodefeature.AppArmor, func() { if isAppArmorEnabled() { ginkgo.BeforeEach(func() { ginkgo.By("Loading AppArmor profiles for testing") diff --git a/test/e2e_node/checkpoint_container.go b/test/e2e_node/checkpoint_container.go index 839ff6e2e4b4b..93bb770d28c90 100644 --- a/test/e2e_node/checkpoint_container.go +++ b/test/e2e_node/checkpoint_container.go @@ -35,6 +35,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -74,7 +75,7 @@ func proxyPostRequest(ctx context.Context, c clientset.Interface, node, endpoint } } -var _ = SIGDescribe("Checkpoint Container [NodeFeature:CheckpointContainer]", func() { +var _ = SIGDescribe("Checkpoint Container", nodefeature.CheckpointContainer, func() { f := framework.NewDefaultFramework("checkpoint-container-test") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline ginkgo.It("will checkpoint a container out of a pod", func(ctx context.Context) { diff --git a/test/e2e_node/container_lifecycle_test.go b/test/e2e_node/container_lifecycle_test.go index dcab6bf44727b..ff0366c45c0e7 100644 --- a/test/e2e_node/container_lifecycle_test.go +++ b/test/e2e_node/container_lifecycle_test.go @@ -44,7 +44,7 @@ func prefixedName(namePrefix string, name string) string { return fmt.Sprintf("%s-%s", namePrefix, name) } -var _ = SIGDescribe("[NodeConformance] Containers Lifecycle", func() { +var _ = SIGDescribe(framework.WithNodeConformance(), "Containers Lifecycle", func() { f := framework.NewDefaultFramework("containers-lifecycle-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -559,7 +559,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle", func() { // reduce test flakes. bufferSeconds := int64(30) - ginkgo.It("should respect termination grace period seconds [NodeConformance]", func() { + f.It("should respect termination grace period seconds", f.WithNodeConformance(), func() { client := e2epod.NewPodClient(f) gracePeriod := int64(30) @@ -580,7 +580,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle", func() { framework.ExpectNoError(err) }) - ginkgo.It("should respect termination grace period seconds with long-running preStop hook [NodeConformance]", func() { + f.It("should respect termination grace period seconds with long-running preStop hook", f.WithNodeConformance(), func() { client := e2epod.NewPodClient(f) gracePeriod := int64(30) @@ -747,7 +747,7 @@ var _ = SIGDescribe("[NodeConformance] Containers Lifecycle", func() { }) }) -var _ = SIGDescribe("[Serial] Containers Lifecycle", func() { +var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() { f := framework.NewDefaultFramework("containers-lifecycle-test-serial") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged @@ -2961,7 +2961,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle", }) }) -var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Serial] Containers Lifecycle", func() { +var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers]", framework.WithSerial(), "Containers Lifecycle", func() { f := framework.NewDefaultFramework("containers-lifecycle-test-serial") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/container_log_rotation_test.go b/test/e2e_node/container_log_rotation_test.go index ece2d4f7e301d..5f83247ed0a1f 100644 --- a/test/e2e_node/container_log_rotation_test.go +++ b/test/e2e_node/container_log_rotation_test.go @@ -41,7 +41,7 @@ const ( rotationConsistentlyTimeout = 2 * time.Minute ) -var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() { +var _ = SIGDescribe("ContainerLogRotation", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("container-log-rotation-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("when a container generates a lot of log", func() { diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index c5f75761fc4c8..f280aaae958bd 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -36,6 +36,7 @@ import ( runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -77,10 +78,10 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect return nil } -var _ = SIGDescribe("Container Manager Misc [Serial]", func() { +var _ = SIGDescribe("Container Manager Misc", framework.WithSerial(), func() { f := framework.NewDefaultFramework("kubelet-container-manager") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - ginkgo.Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() { + f.Describe("Validate OOM score adjustments", nodefeature.OOMScoreAdj, func() { ginkgo.Context("once the node is setup", func() { ginkgo.It("container runtime's oom-score-adj should be -999", func(ctx context.Context) { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) diff --git a/test/e2e_node/cpu_manager_metrics_test.go b/test/e2e_node/cpu_manager_metrics_test.go index 8a093f6c17213..7a760afb21f29 100644 --- a/test/e2e_node/cpu_manager_metrics_test.go +++ b/test/e2e_node/cpu_manager_metrics_test.go @@ -31,6 +31,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -39,7 +40,7 @@ import ( "k8s.io/utils/cpuset" ) -var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() { +var _ = SIGDescribe("CPU Manager Metrics", framework.WithSerial(), feature.CPUManager, func() { f := framework.NewDefaultFramework("cpumanager-metrics") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 80ffa35d265fa..7752e5f3ea12b 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -37,6 +37,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -880,7 +881,7 @@ func isSMTAlignmentError(pod *v1.Pod) bool { } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager]", func() { +var _ = SIGDescribe("CPU Manager", framework.WithSerial(), feature.CPUManager, func() { f := framework.NewDefaultFramework("cpu-manager-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index fc55e85f98b56..493248bef137a 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -29,6 +29,7 @@ import ( kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -43,7 +44,7 @@ const ( bestEffortPodName = "best-effort" ) -var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() { +var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisruptive(), nodefeature.CriticalPod, func() { f := framework.NewDefaultFramework("critical-pod-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("when we need to admit a critical pod", func() { @@ -90,7 +91,7 @@ var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod] } }) - ginkgo.It("should add DisruptionTarget condition to the preempted pod [NodeFeature:PodDisruptionConditions]", func(ctx context.Context) { + f.It("should add DisruptionTarget condition to the preempted pod", nodefeature.PodDisruptionConditions, func(ctx context.Context) { // because adminssion Priority enable, If the priority class is not found, the Pod is rejected. node := getNodeName(ctx, f) nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{ diff --git a/test/e2e_node/deleted_pods_test.go b/test/e2e_node/deleted_pods_test.go index a30a75352034a..85cc049782db8 100644 --- a/test/e2e_node/deleted_pods_test.go +++ b/test/e2e_node/deleted_pods_test.go @@ -38,7 +38,7 @@ const ( testFinalizer = "example.com/test-finalizer" ) -var _ = SIGDescribe("Deleted pods handling [NodeConformance]", func() { +var _ = SIGDescribe("Deleted pods handling", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("deleted-pods-test") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index 8f7efffb7b690..57e7fc2ee011d 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -53,7 +53,7 @@ const ( kubeletAddr = "localhost:10255" ) -var _ = SIGDescribe("Density [Serial] [Slow]", func() { +var _ = SIGDescribe("Density", framework.WithSerial(), framework.WithSlow(), func() { const ( // The data collection time of resource collector and the standalone cadvisor // is not synchronized, so resource collector may miss data or diff --git a/test/e2e_node/device_manager_test.go b/test/e2e_node/device_manager_test.go index 55d6b7f979c81..c290a02d7bd2b 100644 --- a/test/e2e_node/device_manager_test.go +++ b/test/e2e_node/device_manager_test.go @@ -40,11 +40,13 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" + "k8s.io/kubernetes/test/e2e/nodefeature" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo/v2" @@ -59,7 +61,7 @@ const ( ) // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeature:DeviceManager]", func() { +var _ = SIGDescribe("Device Manager", framework.WithSerial(), feature.DeviceManager, nodefeature.DeviceManager, func() { checkpointFullPath := filepath.Join(devicePluginDir, checkpointName) f := framework.NewDefaultFramework("devicemanager-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -319,7 +321,7 @@ var _ = SIGDescribe("Device Manager [Serial] [Feature:DeviceManager][NodeFeatur 12. Delete the sample device plugin pod. 13. Remove `/var/lib/kubelet/device-plugins/sample/` and its content, the directory created to control registration */ - ginkgo.Context("With sample device plugin [Serial] [Disruptive]", func() { + f.Context("With sample device plugin", f.WithSerial(), f.WithDisruptive(), func() { var deviceCount int = 2 var devicePluginPod *v1.Pod var triggerPathFile, triggerPathDir string diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 7563e053c357f..ed4418744ef3c 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -49,11 +49,13 @@ import ( kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1" kubeletpodresourcesv1alpha1 "k8s.io/kubelet/pkg/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles" + "k8s.io/kubernetes/test/e2e/nodefeature" ) var ( @@ -62,7 +64,7 @@ var ( ) // Serial because the test restarts Kubelet -var _ = SIGDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]", func() { +var _ = SIGDescribe("Device Plugin", feature.DevicePluginProbe, nodefeature.DevicePluginProbe, framework.WithSerial(), func() { f := framework.NewDefaultFramework("device-plugin-errors") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged testDevicePlugin(f, kubeletdevicepluginv1beta1.DevicePluginPath) @@ -93,7 +95,7 @@ const ( func testDevicePlugin(f *framework.Framework, pluginSockDir string) { pluginSockDir = filepath.Join(pluginSockDir) + "/" - ginkgo.Context("DevicePlugin [Serial] [Disruptive]", func() { + f.Context("DevicePlugin", f.WithSerial(), f.WithDisruptive(), func() { var devicePluginPod, dptemplate *v1.Pod var v1alphaPodResources *kubeletpodresourcesv1alpha1.ListPodResourcesResponse var v1PodResources *kubeletpodresourcesv1.ListPodResourcesResponse @@ -705,7 +707,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) { - ginkgo.Context("DevicePlugin [Serial] [Disruptive]", func() { + f.Context("DevicePlugin", f.WithSerial(), f.WithDisruptive(), func() { var devicePluginPod *v1.Pod var v1PodResources *kubeletpodresourcesv1.ListPodResourcesResponse var triggerPathFile, triggerPathDir string diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index 22e7221ea75fe..5344a05753e9a 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -18,7 +18,7 @@ limitations under the License. E2E Node test for DRA (Dynamic Resource Allocation) This test covers node-specific aspects of DRA The test can be run locally on Linux this way: - make test-e2e-node FOCUS='\[NodeFeature:DynamicResourceAllocation\]' SKIP='\[Flaky\]' PARALLELISM=1 \ + make test-e2e-node FOCUS='\[NodeAlphaFeature:DynamicResourceAllocation\]' SKIP='\[Flaky\]' PARALLELISM=1 \ TEST_ARGS='--feature-gates="DynamicResourceAllocation=true" --service-feature-gates="DynamicResourceAllocation=true" --runtime-config=api/all=true' */ @@ -42,6 +42,7 @@ import ( dra "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -59,13 +60,13 @@ const ( podInPendingStateTimeout = time.Second * 60 // how long to wait for a pod to stay in pending state ) -var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation][NodeAlphaFeature:DynamicResourceAllocation]", func() { +var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, "[NodeAlphaFeature:DynamicResourceAllocation]", func() { f := framework.NewDefaultFramework("dra-node") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline var kubeletPlugin *testdriver.ExamplePlugin - ginkgo.Context("Resource Kubelet Plugin [Serial]", func() { + f.Context("Resource Kubelet Plugin", f.WithSerial(), func() { ginkgo.BeforeEach(func(ctx context.Context) { kubeletPlugin = newKubeletPlugin(getNodeName(ctx, f)) }) diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 2b5a7d553bee6..d4d1149610410 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -37,9 +37,11 @@ import ( evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -69,7 +71,7 @@ const ( // InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods. // Node disk pressure is induced by consuming all inodes on the node. -var _ = SIGDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("inode-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeDiskPressure @@ -106,7 +108,7 @@ var _ = SIGDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Evic // ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images // Disk pressure is induced by pulling large images -var _ = SIGDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("image-gc-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute @@ -137,7 +139,7 @@ var _ = SIGDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature: // MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods. // Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved. -var _ = SIGDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("MemoryAllocatableEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("memory-allocatable-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeMemoryPressure @@ -171,7 +173,7 @@ var _ = SIGDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][Node // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space. -var _ = SIGDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 15 * time.Minute @@ -210,7 +212,7 @@ var _ = SIGDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeatu // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold. // Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run. -var _ = SIGDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute @@ -249,7 +251,7 @@ var _ = SIGDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeF // This test validates that in-memory EmptyDir's are evicted when the Kubelet does // not have Sized Memory Volumes enabled. When Sized volumes are enabled, it's // not possible to exhaust the quota. -var _ = SIGDescribe("LocalStorageCapacityIsolationMemoryBackedVolumeEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("LocalStorageCapacityIsolationMemoryBackedVolumeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolation, nodefeature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged evictionTestTimeout := 7 * time.Minute @@ -292,7 +294,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationMemoryBackedVolumeEviction [Sl }) // LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions -var _ = SIGDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolation, nodefeature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged evictionTestTimeout := 10 * time.Minute @@ -345,7 +347,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disr // PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("PriorityMemoryEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeMemoryPressure @@ -405,7 +407,7 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive] // PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeDiskPressure @@ -464,7 +466,7 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disru }) // PriorityPidEvictionOrdering tests that the node emits pid pressure in response to a fork bomb, and evicts pods by priority -var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { +var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { f := framework.NewDefaultFramework("pidpressure-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute @@ -511,7 +513,7 @@ var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][No runEvictionTest(f, pressureTimeout, expectedNodeCondition, expectedStarvedResource, logPidMetrics, specs) }) - ginkgo.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled [NodeFeature:PodDisruptionConditions]", func() { + f.Context(fmt.Sprintf(testContextFmt, expectedNodeCondition)+"; PodDisruptionConditions enabled", nodefeature.PodDisruptionConditions, func() { tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { pidsConsumed := int64(10000) summary := eventuallyGetSummary(ctx) diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index a89381571bd97..7f7dc46f1e211 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/kubelet/pkg/types" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -72,7 +73,7 @@ type testRun struct { // GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here: // http://kubernetes.io/docs/admin/garbage-collection/ -var _ = SIGDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() { +var _ = SIGDescribe("GarbageCollect", framework.WithSerial(), nodefeature.GarbageCollect, func() { f := framework.NewDefaultFramework("garbage-collect-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged containerNamePrefix := "gc-test-container-" diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index c57f301d5f3da..55c4b2c770248 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/cm" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -200,7 +201,7 @@ func getHugepagesTestPod(f *framework.Framework, limits v1.ResourceList, mounts } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:HugePages]", func() { +var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, "[NodeSpecialFeature:HugePages]", func() { f := framework.NewDefaultFramework("hugepages-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index eaa617bd1c9ff..94581b375380b 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -24,13 +24,14 @@ import ( "k8s.io/apimachinery/pkg/util/dump" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) -var _ = SIGDescribe("ImageID [NodeFeature: ImageID]", func() { +var _ = SIGDescribe("ImageID", nodefeature.ImageID, func() { busyBoxImage := "registry.k8s.io/e2e-test-images/busybox@sha256:a9155b13325b2abef48e71de77bb8ac015412a566829f621d06bfae5c699b1b9" diff --git a/test/e2e_node/lock_contention_linux_test.go b/test/e2e_node/lock_contention_linux_test.go index 50434f861327d..dd808585a76b6 100644 --- a/test/e2e_node/lock_contention_linux_test.go +++ b/test/e2e_node/lock_contention_linux_test.go @@ -36,7 +36,7 @@ const contentionLockFile = "/var/run/kubelet.lock" // Disruptive because the kubelet is restarted in the test. // NodeSpecialFeature:LockContention because we don't want the test to be picked up by any other // test suite, hence the unique name "LockContention". -var _ = SIGDescribe("Lock contention [Slow] [Disruptive] [NodeSpecialFeature:LockContention]", func() { +var _ = SIGDescribe("Lock contention", framework.WithSlow(), framework.WithDisruptive(), "[NodeSpecialFeature:LockContention]", func() { // Requires `--lock-file` & `--exit-on-lock-contention` flags to be set on the Kubelet. ginkgo.It("Kubelet should stop when the test acquires the lock on lock file and restart once the lock is released", func(ctx context.Context) { diff --git a/test/e2e_node/log_path_test.go b/test/e2e_node/log_path_test.go index 68f2cf544aa37..a1589e6d91ecc 100644 --- a/test/e2e_node/log_path_test.go +++ b/test/e2e_node/log_path_test.go @@ -36,7 +36,7 @@ const ( logContainerName = "logger" ) -var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() { +var _ = SIGDescribe("ContainerLogPath", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("kubelet-container-log-path") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var podClient *e2epod.PodClient diff --git a/test/e2e_node/memory_manager_test.go b/test/e2e_node/memory_manager_test.go index ac91ca0a2f95a..7c9e6b1c3587a 100644 --- a/test/e2e_node/memory_manager_test.go +++ b/test/e2e_node/memory_manager_test.go @@ -39,6 +39,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/apis/podresources" "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state" "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" admissionapi "k8s.io/pod-security-admission/api" @@ -241,7 +242,7 @@ func getAllNUMANodes() []int { } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager]", func() { +var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.WithSerial(), feature.MemoryManager, func() { // TODO: add more complex tests that will include interaction between CPUManager, MemoryManager and TopologyManager var ( allNUMANodes []int diff --git a/test/e2e_node/mirror_pod_grace_period_test.go b/test/e2e_node/mirror_pod_grace_period_test.go index fecb199441eed..3aae29268ef9a 100644 --- a/test/e2e_node/mirror_pod_grace_period_test.go +++ b/test/e2e_node/mirror_pod_grace_period_test.go @@ -60,7 +60,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { }, 2*time.Minute, time.Second*4).Should(gomega.BeNil()) }) - ginkgo.It("mirror pod termination should satisfy grace period when static pod is deleted [NodeConformance]", func(ctx context.Context) { + f.It("mirror pod termination should satisfy grace period when static pod is deleted", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -78,7 +78,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { }, 19*time.Second, 200*time.Millisecond).Should(gomega.BeNil()) }) - ginkgo.It("mirror pod termination should satisfy grace period when static pod is updated [NodeConformance]", func(ctx context.Context) { + f.It("mirror pod termination should satisfy grace period when static pod is updated", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -106,7 +106,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { gomega.Expect(pod.Spec.Containers[0].Image).To(gomega.Equal(image)) }) - ginkgo.It("should update a static pod when the static pod is updated multiple times during the graceful termination period [NodeConformance]", func(ctx context.Context) { + f.It("should update a static pod when the static pod is updated multiple times during the graceful termination period", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -135,7 +135,7 @@ var _ = SIGDescribe("MirrorPodWithGracePeriod", func() { gomega.Expect(pod.Spec.Containers[0].Image).To(gomega.Equal(image)) }) - ginkgo.Context("and the container runtime is temporarily down during pod termination [NodeConformance] [Serial] [Disruptive]", func() { + f.Context("and the container runtime is temporarily down during pod termination", f.WithNodeConformance(), f.WithSerial(), f.WithDisruptive(), func() { ginkgo.BeforeEach(func(ctx context.Context) { // Ensure that prior to the test starting, no other pods are running or in the process of being terminated other than the mirror pod. // This is necessary as the test verifies metrics that assume that there is only one pod (the static pod) being run, and all other pods have been terminated. diff --git a/test/e2e_node/mirror_pod_test.go b/test/e2e_node/mirror_pod_test.go index e4358fe4de386..5c9d8737d2729 100644 --- a/test/e2e_node/mirror_pod_test.go +++ b/test/e2e_node/mirror_pod_test.go @@ -70,7 +70,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, update Description: Updating a static Pod MUST recreate an updated mirror Pod. Create a static pod, verify that a mirror pod is created. Update the static pod by changing the container image, the mirror pod MUST be re-created and updated with the new image. */ - ginkgo.It("should be updated when static pod updated [NodeConformance]", func(ctx context.Context) { + f.It("should be updated when static pod updated", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -97,7 +97,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, delete Description: When a mirror-Pod is deleted then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod, the mirror pod MUST be re-created and running. */ - ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func(ctx context.Context) { + f.It("should be recreated when mirror pod gracefully deleted", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -117,7 +117,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, force delete Description: When a mirror-Pod is deleted, forcibly, then the mirror pod MUST be re-created. Create a static pod, verify that a mirror pod is created. Delete the mirror pod with delete wait time set to zero forcing immediate deletion, the mirror pod MUST be re-created and running. */ - ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func(ctx context.Context) { + f.It("should be recreated when mirror pod forcibly deleted", f.WithNodeConformance(), func(ctx context.Context) { ginkgo.By("get mirror pod uid") pod, err := f.ClientSet.CoreV1().Pods(ns).Get(ctx, mirrorPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -152,7 +152,7 @@ var _ = SIGDescribe("MirrorPod", func() { Testname: Mirror Pod, recreate Description: When a static pod's manifest is removed and readded, the mirror pod MUST successfully recreate. Create the static pod, verify it is running, remove its manifest and then add it back, and verify the static pod runs again. */ - ginkgo.It("should successfully recreate when file is removed and recreated [NodeConformance]", func(ctx context.Context) { + f.It("should successfully recreate when file is removed and recreated", f.WithNodeConformance(), func(ctx context.Context) { ns = f.Namespace.Name staticPodName = "static-pod-" + string(uuid.NewUUID()) mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName @@ -199,7 +199,7 @@ var _ = SIGDescribe("MirrorPod", func() { }) ginkgo.Context("when recreating a static pod", func() { var ns, podPath, staticPodName, mirrorPodName string - ginkgo.It("it should launch successfully even if it temporarily failed termination due to volume failing to unmount [NodeConformance] [Serial]", func(ctx context.Context) { + f.It("it should launch successfully even if it temporarily failed termination due to volume failing to unmount", f.WithNodeConformance(), f.WithSerial(), func(ctx context.Context) { node := getNodeName(ctx, f) ns = f.Namespace.Name c := f.ClientSet diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index ae57fa75e3025..1e02dc6b32a40 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig" "github.com/onsi/ginkgo/v2" @@ -63,10 +64,10 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) initialConfig.SystemReservedCgroup = systemReservedCgroup } -var _ = SIGDescribe("Node Container Manager [Serial]", func() { +var _ = SIGDescribe("Node Container Manager", framework.WithSerial(), func() { f := framework.NewDefaultFramework("node-container-manager") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - ginkgo.Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() { + f.Describe("Validate Node Allocatable", nodefeature.NodeAllocatable, func() { ginkgo.It("sets up the node and runs the test", func(ctx context.Context) { framework.ExpectNoError(runTest(ctx, f)) }) diff --git a/test/e2e_node/node_perf_test.go b/test/e2e_node/node_perf_test.go index e37a9ef9f3e26..251255bbefe22 100644 --- a/test/e2e_node/node_perf_test.go +++ b/test/e2e_node/node_perf_test.go @@ -80,7 +80,7 @@ func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletc // Serial because the test updates kubelet configuration. // Slow by design. -var _ = SIGDescribe("Node Performance Testing [Serial] [Slow]", func() { +var _ = SIGDescribe("Node Performance Testing", framework.WithSerial(), framework.WithSlow(), func() { f := framework.NewDefaultFramework("node-performance-testing") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var ( diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index f346b8edad232..3c264af9d0654 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -40,10 +40,11 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" testutils "k8s.io/kubernetes/test/utils" ) -var _ = SIGDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector] [Serial]", func() { +var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, framework.WithSerial(), func() { const ( pollInterval = 1 * time.Second pollConsistent = 5 * time.Second diff --git a/test/e2e_node/node_shutdown_linux_test.go b/test/e2e_node/node_shutdown_linux_test.go index c5d797dc2f8b4..c9fe71aa55a80 100644 --- a/test/e2e_node/node_shutdown_linux_test.go +++ b/test/e2e_node/node_shutdown_linux_test.go @@ -44,6 +44,7 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" "github.com/godbus/dbus/v5" v1 "k8s.io/api/core/v1" @@ -57,7 +58,7 @@ import ( testutils "k8s.io/kubernetes/test/utils" ) -var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShutdown] [NodeFeature:GracefulNodeShutdownBasedOnPodPriority]", func() { +var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.GracefulNodeShutdown, nodefeature.GracefulNodeShutdownBasedOnPodPriority, func() { f := framework.NewDefaultFramework("graceful-node-shutdown") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -83,7 +84,7 @@ var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeFeature:GracefulNodeShut } }) - ginkgo.Context("graceful node shutdown when PodDisruptionConditions are enabled [NodeFeature:PodDisruptionConditions]", func() { + f.Context("graceful node shutdown when PodDisruptionConditions are enabled", nodefeature.PodDisruptionConditions, func() { const ( pollInterval = 1 * time.Second diff --git a/test/e2e_node/oomkiller_linux_test.go b/test/e2e_node/oomkiller_linux_test.go index 938f9c0554a1a..db165ffc38e75 100644 --- a/test/e2e_node/oomkiller_linux_test.go +++ b/test/e2e_node/oomkiller_linux_test.go @@ -43,7 +43,7 @@ type testCase struct { // be reserved for K8s components. const KubeReservedMemory = 0.35 -var _ = SIGDescribe("OOMKiller for pod using more memory than node allocatable [LinuxOnly] [Serial]", func() { +var _ = SIGDescribe("OOMKiller for pod using more memory than node allocatable [LinuxOnly]", framework.WithSerial(), func() { f := framework.NewDefaultFramework("nodeallocatable-oomkiller-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged @@ -61,7 +61,7 @@ var _ = SIGDescribe("OOMKiller for pod using more memory than node allocatable [ } }) -var _ = SIGDescribe("OOMKiller [LinuxOnly] [NodeConformance]", func() { +var _ = SIGDescribe("OOMKiller [LinuxOnly]", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("oomkiller-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/os_label_rename_test.go b/test/e2e_node/os_label_rename_test.go index f13d08e9f4a5a..775ddc6edc4cb 100644 --- a/test/e2e_node/os_label_rename_test.go +++ b/test/e2e_node/os_label_rename_test.go @@ -38,7 +38,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" ) -var _ = SIGDescribe("OSArchLabelReconciliation [Serial] [Slow] [Disruptive]", func() { +var _ = SIGDescribe("OSArchLabelReconciliation", framework.WithSerial(), framework.WithSlow(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("node-label-reconciliation") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("Kubelet", func() { diff --git a/test/e2e_node/pids_test.go b/test/e2e_node/pids_test.go index a942eceb37966..adf1f4ad4ec41 100644 --- a/test/e2e_node/pids_test.go +++ b/test/e2e_node/pids_test.go @@ -120,7 +120,7 @@ func runPodPidsLimitTests(f *framework.Framework) { } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("PodPidsLimit [Serial]", func() { +var _ = SIGDescribe("PodPidsLimit", framework.WithSerial(), func() { f := framework.NewDefaultFramework("pids-limit-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("With config updated with pids limits", func() { diff --git a/test/e2e_node/pod_conditions_test.go b/test/e2e_node/pod_conditions_test.go index a94ccd13825d5..f8eae5511afab 100644 --- a/test/e2e_node/pod_conditions_test.go +++ b/test/e2e_node/pod_conditions_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/kubelet/events" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -47,7 +48,7 @@ var _ = SIGDescribe("Pod conditions managed by Kubelet", func() { f := framework.NewDefaultFramework("pod-conditions") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline - ginkgo.Context("including PodReadyToStartContainers condition [Serial] [Feature:PodReadyToStartContainersCondition]", func() { + f.Context("including PodReadyToStartContainers condition", f.WithSerial(), feature.PodReadyToStartContainersCondition, func() { tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { initialConfig.FeatureGates = map[string]bool{ string(features.PodReadyToStartContainersCondition): true, diff --git a/test/e2e_node/pod_host_ips.go b/test/e2e_node/pod_host_ips.go index a64dcd5bc76d4..d1e5b1377186c 100644 --- a/test/e2e_node/pod_host_ips.go +++ b/test/e2e_node/pod_host_ips.go @@ -32,6 +32,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" kubefeatures "k8s.io/kubernetes/pkg/features" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -39,11 +40,12 @@ import ( e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/network/common" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) -var _ = common.SIGDescribe("DualStack Host IP [Serial] [NodeFeature:PodHostIPs] [Feature:PodHostIPs]", func() { +var _ = common.SIGDescribe("DualStack Host IP", framework.WithSerial(), nodefeature.PodHostIPs, feature.PodHostIPs, func() { f := framework.NewDefaultFramework("dualstack") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/pod_hostnamefqdn_test.go b/test/e2e_node/pod_hostnamefqdn_test.go index 836e3052e2689..4d6c1dbd24776 100644 --- a/test/e2e_node/pod_hostnamefqdn_test.go +++ b/test/e2e_node/pod_hostnamefqdn_test.go @@ -73,7 +73,7 @@ func testPod(podnamebase string) *v1.Pod { return pod } -var _ = SIGDescribe("Hostname of Pod [NodeConformance]", func() { +var _ = SIGDescribe("Hostname of Pod", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("hostfqdn") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline /* diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index 14b301d763bbe..7778328a33deb 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -43,11 +43,13 @@ import ( "github.com/onsi/gomega" "github.com/onsi/gomega/gstruct" "github.com/onsi/gomega/types" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" ) const ( @@ -619,7 +621,7 @@ func podresourcesGetTests(ctx context.Context, f *framework.Framework, cli kubel } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:PodResources]", func() { +var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResources, nodefeature.PodResources, func() { f := framework.NewDefaultFramework("podresources-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -892,7 +894,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P }) }) - ginkgo.Context("when querying /metrics [NodeConformance]", func() { + f.Context("when querying /metrics", f.WithNodeConformance(), func() { tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) { if initialConfig.FeatureGates == nil { initialConfig.FeatureGates = make(map[string]bool) diff --git a/test/e2e_node/pods_container_manager_test.go b/test/e2e_node/pods_container_manager_test.go index a9a09455e7717..cc815c438b1b5 100644 --- a/test/e2e_node/pods_container_manager_test.go +++ b/test/e2e_node/pods_container_manager_test.go @@ -170,7 +170,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { ginkgo.Describe("QOS containers", func() { ginkgo.Context("On enabling QOS cgroup hierarchy", func() { - ginkgo.It("Top level QoS containers should have been created [NodeConformance]", func(ctx context.Context) { + f.It("Top level QoS containers should have been created", f.WithNodeConformance(), func(ctx context.Context) { if !kubeletCfg.CgroupsPerQOS { return } @@ -183,7 +183,7 @@ var _ = SIGDescribe("Kubelet Cgroup Manager", func() { }) }) - ginkgo.Describe("Pod containers [NodeConformance]", func() { + f.Describe("Pod containers", f.WithNodeConformance(), func() { ginkgo.Context("On scheduling a Guaranteed Pod", func() { ginkgo.It("Pod containers should have been created under the cgroup-root", func(ctx context.Context) { if !kubeletCfg.CgroupsPerQOS { diff --git a/test/e2e_node/pods_lifecycle_termination_test.go b/test/e2e_node/pods_lifecycle_termination_test.go index 804a6b546cc74..eeda9f3d561a9 100644 --- a/test/e2e_node/pods_lifecycle_termination_test.go +++ b/test/e2e_node/pods_lifecycle_termination_test.go @@ -35,7 +35,7 @@ import ( // Pod sigkill test will cover pods with graceful termination period set but failed // to terminate and forcefully killed by kubelet. This test examine pod's container's // exit code is 137 and the exit reason is `Error` -var _ = SIGDescribe("Pod SIGKILL [LinuxOnly] [NodeConformance]", func() { +var _ = SIGDescribe("Pod SIGKILL [LinuxOnly]", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("sigkill-test") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e_node/quota_lsci_test.go b/test/e2e_node/quota_lsci_test.go index 0b198a8f2f2e9..f7061cfb22b1f 100644 --- a/test/e2e_node/quota_lsci_test.go +++ b/test/e2e_node/quota_lsci_test.go @@ -28,8 +28,10 @@ import ( "k8s.io/kubernetes/pkg/features" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/volume/util/fsquota" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" "k8s.io/mount-utils" admissionapi "k8s.io/pod-security-admission/api" @@ -96,7 +98,7 @@ func runOneQuotaTest(f *framework.Framework, quotasRequested bool) { // pod that creates a file, deletes it, and writes data to it. If // quotas are used to monitor, it will detect this deleted-but-in-use // file; if du is used to monitor, it will not detect this. -var _ = SIGDescribe("LocalStorageCapacityIsolationFSQuotaMonitoring [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolationQuota][NodeFeature:LSCIQuotaMonitoring]", func() { +var _ = SIGDescribe("LocalStorageCapacityIsolationFSQuotaMonitoring", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.LSCIQuotaMonitoring, func() { f := framework.NewDefaultFramework("localstorage-quota-monitoring-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged runOneQuotaTest(f, true) diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go index 542d0cce1e98f..37139a40dd403 100644 --- a/test/e2e_node/resource_metrics_test.go +++ b/test/e2e_node/resource_metrics_test.go @@ -27,6 +27,7 @@ import ( e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" + "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" "github.com/prometheus/common/model" @@ -43,7 +44,7 @@ const ( maxStatsAge = time.Minute ) -var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() { +var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, func() { f := framework.NewDefaultFramework("resource-metrics") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("when querying /resource/metrics", func() { diff --git a/test/e2e_node/resource_usage_test.go b/test/e2e_node/resource_usage_test.go index ca0b743bf7580..c1ce59e1bce00 100644 --- a/test/e2e_node/resource_usage_test.go +++ b/test/e2e_node/resource_usage_test.go @@ -37,7 +37,7 @@ import ( "github.com/onsi/ginkgo/v2" ) -var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() { +var _ = SIGDescribe("Resource-usage", framework.WithSerial(), framework.WithSlow(), func() { const ( // Interval to poll /stats/container on a node containerStatsPollingPeriod = 10 * time.Second diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 5fb8f9c5846f2..aa8194f44e9e5 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -72,7 +72,7 @@ func waitForPodsCondition(ctx context.Context, f *framework.Framework, podCount return runningPods } -var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() { +var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), framework.WithDisruptive(), func() { const ( // Saturate the node. It's not necessary that all these pods enter // Running/Ready, because we don't know the number of cores in the diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 70566764f5e05..0aa256d40030f 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -68,7 +68,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() { }, } { testCase := testCase - ginkgo.It(testCase.description+" [NodeConformance]", func(ctx context.Context) { + f.It(testCase.description+"", f.WithNodeConformance(), func(ctx context.Context) { name := "image-pull-test" command := []string{"/bin/sh", "-c", "while true; do sleep 1; done"} container := node.ConformanceContainer{ diff --git a/test/e2e_node/seccompdefault_test.go b/test/e2e_node/seccompdefault_test.go index b1828a583edcd..2f0792dd2a8c5 100644 --- a/test/e2e_node/seccompdefault_test.go +++ b/test/e2e_node/seccompdefault_test.go @@ -30,6 +30,7 @@ import ( admissionapi "k8s.io/pod-security-admission/api" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" ) @@ -41,7 +42,7 @@ const SeccompProcStatusField = "Seccomp:" const ProcSelfStatusPath = "/proc/self/status" // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly]", func() { +var _ = SIGDescribe("SeccompDefault", framework.WithSerial(), feature.SeccompDefault, "[LinuxOnly]", func() { f := framework.NewDefaultFramework("seccompdefault-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index ef0de37905796..fb14283fcf1fe 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -43,7 +44,7 @@ var _ = SIGDescribe("Security Context", func() { podClient = e2epod.NewPodClient(f) }) - ginkgo.Context("[NodeConformance][LinuxOnly] Container PID namespace sharing", func() { + f.Context(framework.WithNodeConformance(), "[LinuxOnly] Container PID namespace sharing", func() { ginkgo.It("containers in pods using isolated PID namespaces should all receive PID 1", func(ctx context.Context) { ginkgo.By("Create a pod with isolated PID namespaces.") e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ @@ -147,7 +148,7 @@ var _ = SIGDescribe("Security Context", func() { nginxPid = strings.TrimSpace(output) }) - ginkgo.It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func(ctx context.Context) { + f.It("should show its pid in the host PID namespace", nodefeature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(ctx, busyboxPodName, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -167,7 +168,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func(ctx context.Context) { + f.It("should not show its pid in the non-hostpid containers", nodefeature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(ctx, busyboxPodName, false) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -223,7 +224,7 @@ var _ = SIGDescribe("Security Context", func() { framework.Logf("Got host shared memory ID %q", hostSharedMemoryID) }) - ginkgo.It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func(ctx context.Context) { + f.It("should show the shared memory ID in the host IPC containers", nodefeature.HostAccess, func(ctx context.Context) { ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ctx, ipcutilsPodName, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -238,7 +239,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func(ctx context.Context) { + f.It("should not show the shared memory ID in the non-hostIPC containers", nodefeature.HostAccess, func(ctx context.Context) { ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ctx, ipcutilsPodName, false) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -306,7 +307,7 @@ var _ = SIGDescribe("Security Context", func() { framework.Logf("Opened a new tcp port %q", listeningPort) }) - ginkgo.It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func(ctx context.Context) { + f.It("should listen on same port in the host network containers", nodefeature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(ctx, busyboxPodName, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -320,7 +321,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - ginkgo.It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func(ctx context.Context) { + f.It("shouldn't show the same port in the non-hostnetwork containers", nodefeature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(ctx, busyboxPodName, false) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) diff --git a/test/e2e_node/standalone_test.go b/test/e2e_node/standalone_test.go index 190a92e68440f..72eb293823f54 100644 --- a/test/e2e_node/standalone_test.go +++ b/test/e2e_node/standalone_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/cli-runtime/pkg/printers" "k8s.io/kubernetes/pkg/cluster/ports" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" @@ -44,7 +45,7 @@ import ( testutils "k8s.io/kubernetes/test/utils" ) -var _ = SIGDescribe("[Feature:StandaloneMode]", func() { +var _ = SIGDescribe(feature.StandaloneMode, func() { f := framework.NewDefaultFramework("static-pod") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline ginkgo.Context("when creating a static pod", func() { diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index 774b28003fc96..067a6f4877b76 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -40,7 +40,7 @@ import ( "github.com/onsi/gomega/types" ) -var _ = SIGDescribe("Summary API [NodeConformance]", func() { +var _ = SIGDescribe("Summary API", framework.WithNodeConformance(), func() { f := framework.NewDefaultFramework("summary-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("when querying /stats/summary", func() { diff --git a/test/e2e_node/swap_test.go b/test/e2e_node/swap_test.go index 7df97af920c37..dbc8228be3e46 100644 --- a/test/e2e_node/swap_test.go +++ b/test/e2e_node/swap_test.go @@ -19,6 +19,9 @@ package e2enode import ( "context" "fmt" + "path/filepath" + "strconv" + "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" @@ -32,8 +35,6 @@ import ( e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" admissionapi "k8s.io/pod-security-admission/api" - "path/filepath" - "strconv" ) const ( @@ -43,7 +44,7 @@ const ( cgroupV1MemLimitFile = "/memory/memory.limit_in_bytes" ) -var _ = SIGDescribe("Swap [NodeConformance][LinuxOnly]", func() { +var _ = SIGDescribe("Swap", framework.WithNodeConformance(), "[LinuxOnly]", func() { f := framework.NewDefaultFramework("swap-test") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go index 61edcfd2fd5c3..d4a314e03bfbd 100644 --- a/test/e2e_node/system_node_critical_test.go +++ b/test/e2e_node/system_node_critical_test.go @@ -29,13 +29,14 @@ import ( kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) -var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFeature:SystemNodeCriticalPod]", func() { +var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.SystemNodeCriticalPod, func() { f := framework.NewDefaultFramework("system-node-critical-pod-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged // this test only manipulates pods in kube-system diff --git a/test/e2e_node/topology_manager_metrics_test.go b/test/e2e_node/topology_manager_metrics_test.go index fe17ad9c9fcf8..ce985ec804b31 100644 --- a/test/e2e_node/topology_manager_metrics_test.go +++ b/test/e2e_node/topology_manager_metrics_test.go @@ -28,13 +28,14 @@ import ( v1 "k8s.io/api/core/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" admissionapi "k8s.io/pod-security-admission/api" ) -var _ = SIGDescribe("Topology Manager Metrics [Serial] [Feature:TopologyManager]", func() { +var _ = SIGDescribe("Topology Manager Metrics", framework.WithSerial(), feature.TopologyManager, func() { f := framework.NewDefaultFramework("topologymanager-metrics") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index eddf264aa2bc8..2ca30b9bb4c6f 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -954,7 +955,7 @@ func hostPrecheck() (int, int) { } // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("Topology Manager [Serial] [Feature:TopologyManager]", func() { +var _ = SIGDescribe("Topology Manager", framework.WithSerial(), feature.TopologyManager, func() { f := framework.NewDefaultFramework("topology-manager-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/unknown_pods_test.go b/test/e2e_node/unknown_pods_test.go index 301cb57fd0d3c..77e72f3525dee 100644 --- a/test/e2e_node/unknown_pods_test.go +++ b/test/e2e_node/unknown_pods_test.go @@ -44,7 +44,7 @@ import ( * runtime, but it will not be present in the config, thus making the pod a * "unknown pod". Kubelet should then proceed to terminate these unknown pods. */ -var _ = SIGDescribe("Unknown Pods [Serial] [Disruptive]", func() { +var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisruptive(), func() { f := framework.NewDefaultFramework("unknown-pods") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e_node/volume_manager_test.go b/test/e2e_node/volume_manager_test.go index 9c98bfcf78f3f..38a1ba5418534 100644 --- a/test/e2e_node/volume_manager_test.go +++ b/test/e2e_node/volume_manager_test.go @@ -37,7 +37,7 @@ var _ = SIGDescribe("Kubelet Volume Manager", func() { f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Describe("Volume Manager", func() { ginkgo.Context("On termination of pod with memory backed volume", func() { - ginkgo.It("should remove the volume from the node [NodeConformance]", func(ctx context.Context) { + f.It("should remove the volume from the node", f.WithNodeConformance(), func(ctx context.Context) { var ( memoryBackedPod *v1.Pod volumeName string