Skip to content

Commit

Permalink
e2e: use framework labels
Browse files Browse the repository at this point in the history
This changes the text registration so that tags for which the framework has a
dedicated API (features, feature gates, slow, serial, etc.) those APIs are
used.

Arbitrary, custom tags are still left in place for now.
  • Loading branch information
pohly committed Nov 1, 2023
1 parent 833156b commit f2cfbf4
Show file tree
Hide file tree
Showing 230 changed files with 834 additions and 680 deletions.
2 changes: 1 addition & 1 deletion test/conformance/testdata/conformance.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3035,7 +3035,7 @@
file: test/e2e/storage/persistent_volumes.go
- testname: Projected Volume, multiple projections
codename: '[sig-storage] Projected combined should project all components that make
up the projection API [Projection][NodeConformance] [Conformance]'
up the projection API [Projection] [NodeConformance] [Conformance]'
description: A Pod is created with a projected volume source for secrets, configMap
and downwardAPI with pod name, cpu and memory limits and cpu and memory requests.
Pod MUST be able to read the secrets, configMap values and the cpu and memory
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ import (
"github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/lifecycle"
)
var _ = lifecycle.SIGDescribe("[Feature:BootstrapTokens]", func() {
var _ = lifecycle.SIGDescribe("cluster", feature.BootstrapTokens, func() {
/* ... */
ginkgo.It("should sign the new added bootstrap tokens", func(ctx context.Context) {
/* ... */
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/apimachinery/apiserver_identity.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@ import (
"strings"
"time"

"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"golang.org/x/crypto/cryptobyte"

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
Expand Down Expand Up @@ -80,11 +80,11 @@ func restartAPIServer(ctx context.Context, node *v1.Node) error {
}

// This test requires that --feature-gates=APIServerIdentity=true be set on the apiserver
var _ = SIGDescribe("kube-apiserver identity [Feature:APIServerIdentity]", func() {
var _ = SIGDescribe("kube-apiserver identity", feature.APIServerIdentity, func() {
f := framework.NewDefaultFramework("kube-apiserver-identity")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

ginkgo.It("kube-apiserver identity should persist after restart [Disruptive]", func(ctx context.Context) {
f.It("kube-apiserver identity should persist after restart", f.WithDisruptive(), func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce")

client := f.ClientSet
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apimachinery/chunking.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
MUST return the remaining items in chunks of the size not exceeding the limit, with appropriately
set RemainingItems field in the response and with the ResourceVersion returned as part of the inconsistent list.
*/
framework.ConformanceIt("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent [Slow]", func(ctx context.Context) {
framework.ConformanceIt("should support continue listing from the last key if the original version has been compacted away, though the list is inconsistent", f.WithSlow(), func(ctx context.Context) {
ns := f.Namespace.Name
c := f.ClientSet
client := c.CoreV1().PodTemplates(ns)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apimachinery/etcd_failure.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ import (
"github.com/onsi/ginkgo/v2"
)

var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
var _ = SIGDescribe("Etcd failure", framework.WithDisruptive(), func() {

f := framework.NewDefaultFramework("etcd-failure")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/apimachinery/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
Expand Down Expand Up @@ -231,7 +232,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(ctx context.Context, f *fram
// that each have a variable amount of content in the associated Namespace.
// When run in [Serial] this test appears to delete Namespace objects at a
// rate of approximately 1 per second.
var _ = SIGDescribe("Namespaces [Serial]", func() {
var _ = SIGDescribe("Namespaces", framework.WithSerial(), func() {

f := framework.NewDefaultFramework("namespaces")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
Expand Down Expand Up @@ -259,7 +260,7 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
})

// On hold until etcd3; see #7372
ginkgo.It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]", func(ctx context.Context) {
f.It("should always delete fast (ALL of 100 namespaces in 150 seconds)", feature.ComprehensiveNamespaceDraining, func(ctx context.Context) {
extinguish(ctx, f, 100, 0, 150)
})

Expand Down
5 changes: 3 additions & 2 deletions test/e2e/apimachinery/resource_quota.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ import (
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
Expand Down Expand Up @@ -1203,7 +1204,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
})
})

var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
var _ = SIGDescribe("ResourceQuota", feature.ScopeSelectors, func() {
f := framework.NewDefaultFramework("scope-selectors")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func(ctx context.Context) {
Expand Down Expand Up @@ -1384,7 +1385,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
})
})

var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
var _ = SIGDescribe("ResourceQuota", feature.PodPriority, func() {
f := framework.NewDefaultFramework("resourcequota-priorityclass")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline

Expand Down
3 changes: 2 additions & 1 deletion test/e2e/apimachinery/storage_version.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
admissionapi "k8s.io/pod-security-admission/api"

Expand All @@ -36,7 +37,7 @@ const (
)

// This test requires that --feature-gates=APIServerIdentity=true,StorageVersionAPI=true be set on the apiserver and the controller manager
var _ = SIGDescribe("StorageVersion resources [Feature:StorageVersionAPI]", func() {
var _ = SIGDescribe("StorageVersion resources", feature.StorageVersionAPI, func() {
f := framework.NewDefaultFramework("storage-version")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

Expand Down
3 changes: 2 additions & 1 deletion test/e2e/apimachinery/watchlist.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,11 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
)

var _ = SIGDescribe("API Streaming (aka. WatchList) [Serial] [Feature:WatchList]", func() {
var _ = SIGDescribe("API Streaming (aka. WatchList)", framework.WithSerial(), feature.WatchList, func() {
f := framework.NewDefaultFramework("watchlist")
ginkgo.It("should be requested when ENABLE_CLIENT_GO_WATCH_LIST_ALPHA is set", func(ctx context.Context) {
prevWatchListEnvValue, wasWatchListEnvSet := os.LookupEnv("ENABLE_CLIENT_GO_WATCH_LIST_ALPHA")
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/controller_revision.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ const (
// happen. In the future, running in parallel may work if we have an eviction
// model which lets the DS controller kick out other pods to make room.
// See https://issues.k8s.io/21767 for more details
var _ = SIGDescribe("ControllerRevision [Serial]", func() {
var _ = SIGDescribe("ControllerRevision", framework.WithSerial(), func() {
var f *framework.Framework

ginkgo.AfterEach(func(ctx context.Context) {
Expand Down
4 changes: 2 additions & 2 deletions test/e2e/apps/cronjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ var _ = SIGDescribe("CronJob", func() {
Testname: CronJob Suspend
Description: CronJob MUST support suspension, which suppresses creation of new jobs.
*/
framework.ConformanceIt("should not schedule jobs when suspended [Slow]", func(ctx context.Context) {
framework.ConformanceIt("should not schedule jobs when suspended", f.WithSlow(), func(ctx context.Context) {
ginkgo.By("Creating a suspended cronjob")
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batchv1.AllowConcurrent,
sleepCommand, nil, nil)
Expand Down Expand Up @@ -122,7 +122,7 @@ var _ = SIGDescribe("CronJob", func() {
Testname: CronJob ForbidConcurrent
Description: CronJob MUST support ForbidConcurrent policy, allowing to run single, previous job at the time.
*/
framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent [Slow]", func(ctx context.Context) {
framework.ConformanceIt("should not schedule new jobs when ForbidConcurrent", f.WithSlow(), func(ctx context.Context) {
ginkgo.By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1.ForbidConcurrent,
sleepCommand, nil, nil)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/daemon_restart.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ func getContainerRestarts(ctx context.Context, c clientset.Interface, ns string,
return failedContainers, containerRestartNodes.List()
}

var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
var _ = SIGDescribe("DaemonRestart", framework.WithDisruptive(), func() {

f := framework.NewDefaultFramework("daemonrestart")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/daemon_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func updateDaemonSetWithRetries(ctx context.Context, c clientset.Interface, name
// happen. In the future, running in parallel may work if we have an eviction
// model which lets the DS controller kick out other pods to make room.
// See https://issues.k8s.io/21767 for more details
var _ = SIGDescribe("Daemon set [Serial]", func() {
var _ = SIGDescribe("Daemon set", framework.WithSerial(), func() {
var f *framework.Framework

ginkgo.AfterEach(func(ctx context.Context) {
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/apps/disruption.go
Original file line number Diff line number Diff line change
Expand Up @@ -283,11 +283,11 @@ var _ = SIGDescribe("DisruptionController", func() {
// tests with exclusive set to true relies on HostPort to make sure
// only one pod from the replicaset is assigned to each node. This
// requires these tests to be run serially.
var serial string
args := []interface{}{fmt.Sprintf("evictions: %s => %s", c.description, expectation)}
if c.exclusive {
serial = " [Serial]"
args = append(args, framework.WithSerial())
}
ginkgo.It(fmt.Sprintf("evictions: %s => %s%s", c.description, expectation, serial), func(ctx context.Context) {
f.It(append(args, func(ctx context.Context) {
if c.skipForBigClusters {
e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
}
Expand Down Expand Up @@ -338,7 +338,7 @@ var _ = SIGDescribe("DisruptionController", func() {
})
framework.ExpectNoError(err)
}
})
})...)
}

/*
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ done`}
}
})

ginkgo.It("should run a job to completion with CPU requests [Serial]", func(ctx context.Context) {
f.It("should run a job to completion with CPU requests", f.WithSerial(), func(ctx context.Context) {
ginkgo.By("Creating a job that with CPU requests")

testNodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
Expand Down
2 changes: 1 addition & 1 deletion test/e2e/apps/replica_set.go
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ func testRSLifeCycle(ctx context.Context, f *framework.Framework) {
framework.ExpectNoError(err, "Failed to create pods: %s", err)

// Scale the ReplicaSet
ginkgo.By(fmt.Sprintf("Scaling up %q replicaset ", rsName))
ginkgo.By(fmt.Sprintf("Scaling up %q replicaset", rsName))
_, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(update *appsv1.ReplicaSet) {
x := int32(2)
update.Spec.Replicas = &x
Expand Down
9 changes: 5 additions & 4 deletions test/e2e/apps/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@ import (
"k8s.io/client-go/tools/cache"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
Expand Down Expand Up @@ -637,7 +638,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Testname: StatefulSet, Scaling
Description: StatefulSet MUST create Pods in ascending order by ordinal index when scaling up, and delete Pods in descending order when scaling down. Scaling up or down MUST pause if any Pods belonging to the StatefulSet are unhealthy. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func(ctx context.Context) {
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", f.WithSlow(), func(ctx context.Context) {
psLabels := klabels.Set(labels)
w := &cache.ListWatch{
WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) {
Expand Down Expand Up @@ -747,7 +748,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Testname: StatefulSet, Burst Scaling
Description: StatefulSet MUST support the Parallel PodManagementPolicy for burst scaling. This test does not depend on a preexisting default StorageClass or a dynamic provisioner.
*/
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods [Slow]", func(ctx context.Context) {
framework.ConformanceIt("Burst scaling should run to completion even with unhealthy pods", f.WithSlow(), func(ctx context.Context) {
psLabels := klabels.Set(labels)

ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
Expand Down Expand Up @@ -1154,7 +1155,7 @@ var _ = SIGDescribe("StatefulSet", func() {
})
})

ginkgo.Describe("Deploy clustered applications [Feature:StatefulSet] [Slow]", func() {
f.Describe("Deploy clustered applications", feature.StatefulSet, framework.WithSlow(), func() {
var appTester *clusterAppTester

ginkgo.BeforeEach(func(ctx context.Context) {
Expand Down Expand Up @@ -1424,7 +1425,7 @@ var _ = SIGDescribe("StatefulSet", func() {
e2estatefulset.DeleteAllStatefulSets(ctx, c, ns)
})

ginkgo.It("PVC should be recreated when pod is pending due to missing PVC [Disruptive][Serial]", func(ctx context.Context) {
f.It("PVC should be recreated when pod is pending due to missing PVC", f.WithDisruptive(), f.WithSerial(), func(ctx context.Context) {
e2epv.SkipIfNoDefaultStorageClass(ctx, c)

readyNode, err := e2enode.GetRandomReadySchedulableNode(ctx, c)
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/auth/node_authn.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/cluster/ports"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
Expand All @@ -35,7 +36,7 @@ import (
"github.com/onsi/gomega"
)

var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
var _ = SIGDescribe(feature.NodeAuthenticator, func() {

f := framework.NewDefaultFramework("node-authn")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/auth/node_authz.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
Expand All @@ -41,7 +42,7 @@ const (
nodeNamePrefix = "system:node:"
)

var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
var _ = SIGDescribe(feature.NodeAuthorizer, func() {

f := framework.NewDefaultFramework("node-authz")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
Expand Down
5 changes: 3 additions & 2 deletions test/e2e/auth/service_accounts.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/nodefeature"
imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api"
utilptr "k8s.io/utils/pointer"
Expand Down Expand Up @@ -334,7 +335,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Containers MUST verify that the projected service account token can be
read and has correct file mode set including ownership and permission.
*/
ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup]", func(ctx context.Context) {
f.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) {
e2eskipper.SkipIfNodeOSDistroIs("windows")

var (
Expand Down Expand Up @@ -430,7 +431,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
})

ginkgo.It("should support InClusterConfig with token rotation [Slow]", func(ctx context.Context) {
f.It("should support InClusterConfig with token rotation", f.WithSlow(), func(ctx context.Context) {
tenMin := int64(10 * 60)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "inclusterclient"},
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/autoscaling/autoscaling_timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
Expand All @@ -34,7 +35,7 @@ import (
"github.com/onsi/gomega/gmeasure"
)

var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling", func() {
var _ = SIGDescribe(feature.ClusterSizeAutoscalingScaleUp, framework.WithSlow(), "Autoscaling", func() {
f := framework.NewDefaultFramework("autoscaling")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
var experiment *gmeasure.Experiment
Expand Down
Loading

0 comments on commit f2cfbf4

Please sign in to comment.