Skip to content

Commit

Permalink
Fix controller typos
Browse files Browse the repository at this point in the history
  • Loading branch information
NathanBaulch committed Sep 30, 2024
1 parent 1bbe775 commit 033b774
Show file tree
Hide file tree
Showing 56 changed files with 113 additions and 113 deletions.
2 changes: 1 addition & 1 deletion pkg/controller/bootstrap/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

// Package bootstrap provides automatic processes necessary for bootstraping.
// Package bootstrap provides automatic processes necessary for bootstrapping.
// This includes managing and expiring tokens along with signing well known
// configmaps with those tokens.
package bootstrap // import "k8s.io/kubernetes/pkg/controller/bootstrap"
12 changes: 6 additions & 6 deletions pkg/controller/certificates/approver/sarapprove_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,10 @@ func TestRecognizers(t *testing.T) {
testRecognizer(t, badCases, isNodeClientCert, false)
testRecognizer(t, badCases, isSelfNodeClientCert, false)

// cn different then requestor
// cn different then requester
differentCN := []func(b *csrBuilder){
func(b *csrBuilder) {
b.requestor = "joe"
b.requester = "joe"
},
func(b *csrBuilder) {
b.cn = "system:node:bar"
Expand All @@ -193,7 +193,7 @@ func testRecognizer(t *testing.T, cases []func(b *csrBuilder), recognizeFunc fun
signerName: capi.KubeAPIServerClientKubeletSignerName,
cn: "system:node:foo",
orgs: []string{"system:nodes"},
requestor: "system:node:foo",
requester: "system:node:foo",
usages: []capi.KeyUsage{
capi.UsageKeyEncipherment,
capi.UsageDigitalSignature,
Expand All @@ -216,7 +216,7 @@ func testRecognizer(t *testing.T, cases []func(b *csrBuilder), recognizeFunc fun
signerName: capi.KubeAPIServerClientKubeletSignerName,
cn: "system:node:foo",
orgs: []string{"system:nodes"},
requestor: "system:node:foo",
requester: "system:node:foo",
usages: []capi.KeyUsage{
capi.UsageDigitalSignature,
capi.UsageClientAuth,
Expand Down Expand Up @@ -247,7 +247,7 @@ func makeTestCsr() *capi.CertificateSigningRequest {
type csrBuilder struct {
cn string
orgs []string
requestor string
requester string
usages []capi.KeyUsage
dns []string
emails []string
Expand All @@ -274,7 +274,7 @@ func makeFancyTestCsr(b csrBuilder) *capi.CertificateSigningRequest {
}
return &capi.CertificateSigningRequest{
Spec: capi.CertificateSigningRequestSpec{
Username: b.requestor,
Username: b.requester,
Usages: b.usages,
SignerName: b.signerName,
Request: pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrb}),
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/certificates/cleaner/cleaner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func TestCleanerWithApprovedExpiredCSR(t *testing.T) {
[]string{"delete"},
},
{
"delete approved passed deadline unparseable",
"delete approved passed deadline unparsable",
metav1.NewTime(time.Now().Add(-1 * time.Minute)),
[]byte(`garbage`),
[]capi.CertificateSigningRequestCondition{
Expand Down
8 changes: 4 additions & 4 deletions pkg/controller/cronjob/cronjob_controllerv2.go
Original file line number Diff line number Diff line change
Expand Up @@ -395,8 +395,8 @@ func (jm *ControllerV2) updateCronJob(logger klog.Logger, old interface{}, curr
if err != nil {
// this is likely a user error in defining the spec value
// we should log the error and not reconcile this cronjob until an update to spec
logger.V(2).Info("Unparseable schedule for cronjob", "cronjob", klog.KObj(newCJ), "schedule", newCJ.Spec.Schedule, "err", err)
jm.recorder.Eventf(newCJ, corev1.EventTypeWarning, "UnParseableCronJobSchedule", "unparseable schedule for cronjob: %s", newCJ.Spec.Schedule)
logger.V(2).Info("Unparsable schedule for cronjob", "cronjob", klog.KObj(newCJ), "schedule", newCJ.Spec.Schedule, "err", err)
jm.recorder.Eventf(newCJ, corev1.EventTypeWarning, "UnParseableCronJobSchedule", "unparsable schedule for cronjob: %s", newCJ.Spec.Schedule)
return
}
now := jm.now()
Expand Down Expand Up @@ -515,8 +515,8 @@ func (jm *ControllerV2) syncCronJob(
if err != nil {
// this is likely a user error in defining the spec value
// we should log the error and not reconcile this cronjob until an update to spec
logger.V(2).Info("Unparseable schedule", "cronjob", klog.KObj(cronJob), "schedule", cronJob.Spec.Schedule, "err", err)
jm.recorder.Eventf(cronJob, corev1.EventTypeWarning, "UnparseableSchedule", "unparseable schedule: %q : %s", cronJob.Spec.Schedule, err)
logger.V(2).Info("Unparsable schedule", "cronjob", klog.KObj(cronJob), "schedule", cronJob.Spec.Schedule, "err", err)
jm.recorder.Eventf(cronJob, corev1.EventTypeWarning, "UnparsableSchedule", "unparsable schedule: %q : %s", cronJob.Spec.Schedule, err)
return nil, updateStatus, nil
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/cronjob/cronjob_controllerv2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1942,7 +1942,7 @@ func TestControllerV2JobAlreadyExistsButNotInActiveStatus(t *testing.T) {
}
}

// TestControllerV2JobAlreadyExistsButDifferentOwnner validates that an already created job
// TestControllerV2JobAlreadyExistsButDifferentOwner validates that an already created job
// not owned by the cronjob controller is ignored.
func TestControllerV2JobAlreadyExistsButDifferentOwner(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/cronjob/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ func TestGetJobFromTemplate2(t *testing.T) {
t.Errorf("Wrong timezone location")
}
if len(job.ObjectMeta.Annotations) != 0 && scheduledAnnotation != tt.expectedScheduledTime().Format(time.RFC3339) {
t.Errorf("Wrong cronJob scheduled timestamp annotation, expexted %s, got %s.", tt.expectedScheduledTime().In(timeZoneLocation).Format(time.RFC3339), scheduledAnnotation)
t.Errorf("Wrong cronJob scheduled timestamp annotation, expected %s, got %s.", tt.expectedScheduledTime().In(timeZoneLocation).Format(time.RFC3339), scheduledAnnotation)
}
})
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/daemon/update.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.Dae
}
}

// use any of the candidates we can, including the allowedReplacemnntPods
// use any of the candidates we can, including the allowedReplacementPods
logger.V(5).Info("DaemonSet allowing replacements", "daemonset", klog.KObj(ds), "replacements", len(allowedReplacementPods), "maxUnavailable", maxUnavailable, "numUnavailable", numUnavailable, "candidates", len(candidatePodsToDelete))
remainingUnavailable := maxUnavailable - numUnavailable
if remainingUnavailable < 0 {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/daemon/update_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)

// surge is thhe controlling amount
// surge is the controlling amount
maxSurge := 2
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge)))
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/deployment/progress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func newDeploymentStatus(replicas, updatedReplicas, availableReplicas int32) app
}
}

// assumes the retuned deployment is always observed - not needed to be tested here.
// assumes the returned deployment is always observed - not needed to be tested here.
func currentDeployment(pds *int32, replicas, statusReplicas, updatedReplicas, availableReplicas int32, conditions []apps.DeploymentCondition) *apps.Deployment {
d := &apps.Deployment{
ObjectMeta: metav1.ObjectMeta{
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/deployment/util/deployment_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func SetNewReplicaSetAnnotations(ctx context.Context, deployment *apps.Deploymen
logger.Info("Updating replica set revision OldRevision not int", "err", err)
return false
}
//If the RS annotation is empty then initialise it to 0
// If the RS annotation is empty then initialise it to 0
oldRevisionInt = 0
}
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
Expand Down
14 changes: 7 additions & 7 deletions pkg/controller/deployment/util/deployment_util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1023,21 +1023,21 @@ func TestMaxUnavailable(t *testing.T) {
// Set of simple tests for annotation related util functions
func TestAnnotationUtils(t *testing.T) {

//Setup
// Setup
tDeployment := generateDeployment("nginx")
tRS := generateRS(tDeployment)
tDeployment.Annotations[RevisionAnnotation] = "1"

//Test Case 1: Check if anotations are copied properly from deployment to RS
// Test Case 1: Check if annotations are copied properly from deployment to RS
t.Run("SetNewReplicaSetAnnotations", func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)

//Try to set the increment revision from 11 through 20
// Try to set the increment revision from 11 through 20
for i := 10; i < 20; i++ {

nextRevision := fmt.Sprintf("%d", i+1)
SetNewReplicaSetAnnotations(ctx, &tDeployment, &tRS, nextRevision, true, 5)
//Now the ReplicaSets Revision Annotation should be i+1
// Now the ReplicaSets Revision Annotation should be i+1

if i >= 12 {
expectedHistoryAnnotation := fmt.Sprintf("%d,%d", i-1, i)
Expand All @@ -1051,7 +1051,7 @@ func TestAnnotationUtils(t *testing.T) {
}
})

//Test Case 2: Check if annotations are set properly
// Test Case 2: Check if annotations are set properly
t.Run("SetReplicasAnnotations", func(t *testing.T) {
updated := SetReplicasAnnotations(&tRS, 10, 11)
if !updated {
Expand All @@ -1072,7 +1072,7 @@ func TestAnnotationUtils(t *testing.T) {
}
})

//Test Case 3: Check if annotations reflect deployments state
// Test Case 3: Check if annotations reflect deployments state
tRS.Annotations[DesiredReplicasAnnotation] = "1"
tRS.Status.AvailableReplicas = 1
tRS.Spec.Replicas = new(int32)
Expand All @@ -1084,7 +1084,7 @@ func TestAnnotationUtils(t *testing.T) {
t.Errorf("SetReplicasAnnotations Expected=true Obtained=false")
}
})
//Tear Down
// Tear Down
}

func TestReplicasAnnotationsNeedUpdate(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/disruption/disruption.go
Original file line number Diff line number Diff line change
Expand Up @@ -734,7 +734,7 @@ func (dc *DisruptionController) trySync(ctx context.Context, pdb *policy.PodDisr
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "CalculateExpectedPodCountFailed", "Failed to calculate the number of expected pods: %v", err)
return err
}
// We have unmamanged pods, instead of erroring and hotlooping in disruption controller, log and continue.
// We have unmanaged pods, instead of erroring and hotlooping in disruption controller, log and continue.
if len(unmanagedPods) > 0 {
logger.V(4).Info("Found unmanaged pods associated with this PDB", "pods", unmanagedPods)
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "UnmanagedPods", "Pods selected by this PodDisruptionBudget (selector: %v) were found "+
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/endpoint/endpoints_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ func truncateEndpoints(endpoints *v1.Endpoints) bool {
numInSubset = len(subset.NotReadyAddresses)
}

// The number of endpoints per subset will be based on the propotion of endpoints
// The number of endpoints per subset will be based on the proportion of endpoints
// in this subset versus the total number of endpoints. The proportion of endpoints
// will be rounded up which most likely will lead to the last subset having less
// endpoints than the expected proportion.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
)

// Important! The public back-and-forth conversion functions for the types in
// this package with EndpointSliceMirroringControllerConfiguratio types need to
// this package with EndpointSliceMirroringControllerConfiguration types need to
// be manually exposed like this in order for other packages that reference this
// package to be able to call these conversion functions in an autogenerated
// manner. TODO: Fix the bug in conversion-gen so it automatically discovers
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/endpointslicemirroring/metrics/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (spc *EndpointPortCache) numEndpoints() int {
// given Service and updates the corresponding metrics.
// Parameters:
// * endpointsNN refers to a NamespacedName representing the Endpoints resource.
// * epCache refers to a EndpointPortCache for the specified Endpoints reosource.
// * epCache refers to a EndpointPortCache for the specified Endpoints resource.
func (c *Cache) UpdateEndpointPortCache(endpointsNN types.NamespacedName, epCache *EndpointPortCache) {
c.lock.Lock()
defer c.lock.Unlock()
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/endpointslicemirroring/reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1168,7 +1168,7 @@ func expectEndpointSlices(t *testing.T, num, maxEndpointsPerSubset int, endpoint
}

// canonicalize endpoints to match the expected endpoints, otherwise the test
// that creates more endpoints than allowed fail becaused the list of final
// that creates more endpoints than allowed fail because the list of final
// endpoints doesn't match.
for _, epSubset := range endpointsv1.RepackSubsets(endpoints.Subsets) {
if len(epSubset.Addresses) == 0 && len(epSubset.NotReadyAddresses) == 0 {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/garbagecollector/garbagecollector_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1468,7 +1468,7 @@ func TestConflictingData(t *testing.T) {
pendingAttemptToDelete: []*node{makeNode(pod1ns1, withOwners(deployment1extensions))},
}),
// 11,12: process attemptToDelete for child
// final state: child with unresolveable ownerRef remains, queued in pendingAttemptToDelete
// final state: child with unresolvable ownerRef remains, queued in pendingAttemptToDelete
processAttemptToDelete(1),
assertState(state{
clientActions: []string{
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/garbagecollector/graph_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -848,7 +848,7 @@ func (gb *GraphBuilder) processGraphChanges(logger klog.Logger) bool {
if !found || !ownerNode.isDeletingDependents() {
continue
}
// this is to let attempToDeleteItem check if all the owner's
// this is to let attemptToDeleteItem check if all the owner's
// dependents are deleted, if so, the owner will be deleted.
gb.attemptToDelete.Add(ownerNode)
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/controller/job/job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ type Controller struct {

clock clock.WithTicker

// Store with information to compute the expotential backoff delay for pod
// Store with information to compute the exponential backoff delay for pod
// recreation in case of pod failures.
podBackoffStore *backoffStore
}
Expand Down Expand Up @@ -1363,10 +1363,10 @@ func (jm *Controller) flushUncountedAndRemoveFinalizers(ctx context.Context, job
}
var rmErr error
if len(podsToRemoveFinalizer) > 0 {
var rmSucceded []bool
rmSucceded, rmErr = jm.removeTrackingFinalizerFromPods(ctx, jobKey, podsToRemoveFinalizer)
var rmSucceeded []bool
rmSucceeded, rmErr = jm.removeTrackingFinalizerFromPods(ctx, jobKey, podsToRemoveFinalizer)
for i, p := range podsToRemoveFinalizer {
if rmSucceded[i] {
if rmSucceeded[i] {
uidsWithFinalizer.Delete(string(p.UID))
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/job/job_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1449,7 +1449,7 @@ func checkIndexedJobPods(t *testing.T, control *controller.FakePodControl, wantI
}
}

func TestGetNewFinshedPods(t *testing.T) {
func TestGetNewFinishedPods(t *testing.T) {
cases := map[string]struct {
job batch.Job
pods []*v1.Pod
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/job/pod_failure_policy_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
podFailurePolicy: &batch.PodFailurePolicy{
Rules: []batch.PodFailurePolicyRule{
{
Action: "UnkonwnAction",
Action: "UnknownAction",
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{
{
Type: v1.DisruptionTarget,
Expand Down Expand Up @@ -284,7 +284,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
},
},
{
Name: "suppport-container",
Name: "support-container",
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
Expand Down Expand Up @@ -385,7 +385,7 @@ func TestMatchPodFailurePolicy(t *testing.T) {
},
},
{
Name: "suppport-container",
Name: "support-container",
State: v1.ContainerState{
Terminated: &v1.ContainerStateTerminated{
ExitCode: 0,
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/namespace/namespace_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func NewNamespaceController(
}

// nsControllerRateLimiter is tuned for a faster than normal recycle time with default backoff speed and default overall
// requeing speed. We do this so that namespace cleanup is reliably faster and we know that the number of namespaces being
// requeueing speed. We do this so that namespace cleanup is reliably faster and we know that the number of namespaces being
// deleted is smaller than total number of other namespace scoped resources in a cluster.
func nsControllerRateLimiter() workqueue.TypedRateLimiter[string] {
return workqueue.NewTypedMaxOfRateLimiter(
Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/nodeipam/ipam/cidrset/cidr_set_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -370,13 +370,13 @@ func TestDoubleOccupyRelease(t *testing.T) {
operation: "occupy",
numOccupied: 5,
},
// Occupy an already-coccupied element: no change
// Occupy an already-occupied element: no change
{
cidrStr: "10.42.9.0/24",
operation: "occupy",
numOccupied: 5,
},
// Release an coccupied element: -1
// Release an occupied element: -1
{
cidrStr: "10.42.9.0/24",
operation: "release",
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/nodeipam/ipam/range_allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(ctx context.Context, node *v1.Node
allocatedCIDRs[idx] = podCIDR
}

//queue the assignment
// queue the assignment
logger.V(4).Info("Putting node with CIDR into the work queue", "node", klog.KObj(node), "CIDRs", allocatedCIDRs)
return r.updateCIDRsAllocation(ctx, node.Name, allocatedCIDRs)
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/nodeipam/ipam/range_allocator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -566,7 +566,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
if len(updatedNode.Spec.PodCIDRs) == 0 {
continue // not assigned yet
}
//match
// match
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
Expand Down Expand Up @@ -655,7 +655,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
if len(updatedNode.Spec.PodCIDRs) == 0 {
continue // not assigned yet
}
//match
// match
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR {
t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
Expand Down Expand Up @@ -827,7 +827,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
if len(updatedNode.Spec.PodCIDRs) == 0 {
continue // not assigned yet
}
//match
// match
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound {
if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/nodelifecycle/node_lifecycle_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -421,7 +421,7 @@ func NewNodeLifecycleController(
nc.nodeLister = nodeInformer.Lister()

if !utilfeature.DefaultFeatureGate.Enabled(features.SeparateTaintEvictionController) {
logger.Info("Running TaintEvictionController as part of NodeLifecyleController")
logger.Info("Running TaintEvictionController as part of NodeLifecycleController")
tm, err := tainteviction.New(ctx, kubeClient, podInformer, nodeInformer, taintEvictionController)
if err != nil {
return nil, err
Expand Down Expand Up @@ -595,7 +595,7 @@ func (nc *Controller) doNoScheduleTaintingPass(ctx context.Context, nodeName str
func (nc *Controller) doNoExecuteTaintingPass(ctx context.Context) {
// Extract out the keys of the map in order to not hold
// the evictorLock for the entire function and hold it
// only when nescessary.
// only when necessary.
var zoneNoExecuteTainterKeys []string
func() {
nc.evictorLock.Lock()
Expand Down
Loading

0 comments on commit 033b774

Please sign in to comment.