Skip to content

Commit

Permalink
linting
Browse files Browse the repository at this point in the history
  • Loading branch information
NathanBaulch committed Sep 4, 2024
1 parent b318e44 commit 432325d
Show file tree
Hide file tree
Showing 9 changed files with 25 additions and 25 deletions.
2 changes: 1 addition & 1 deletion pkg/controller/deployment/util/deployment_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func SetNewReplicaSetAnnotations(ctx context.Context, deployment *apps.Deploymen
logger.Info("Updating replica set revision OldRevision not int", "err", err)
return false
}
//If the RS annotation is empty then initialise it to 0
// If the RS annotation is empty then initialise it to 0
oldRevisionInt = 0
}
newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64)
Expand Down
14 changes: 7 additions & 7 deletions pkg/controller/deployment/util/deployment_util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1023,21 +1023,21 @@ func TestMaxUnavailable(t *testing.T) {
// Set of simple tests for annotation related util functions
func TestAnnotationUtils(t *testing.T) {

//Setup
// Setup
tDeployment := generateDeployment("nginx")
tRS := generateRS(tDeployment)
tDeployment.Annotations[RevisionAnnotation] = "1"

//Test Case 1: Check if annotations are copied properly from deployment to RS
// Test Case 1: Check if annotations are copied properly from deployment to RS
t.Run("SetNewReplicaSetAnnotations", func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)

//Try to set the increment revision from 11 through 20
// Try to set the increment revision from 11 through 20
for i := 10; i < 20; i++ {

nextRevision := fmt.Sprintf("%d", i+1)
SetNewReplicaSetAnnotations(ctx, &tDeployment, &tRS, nextRevision, true, 5)
//Now the ReplicaSets Revision Annotation should be i+1
// Now the ReplicaSets Revision Annotation should be i+1

if i >= 12 {
expectedHistoryAnnotation := fmt.Sprintf("%d,%d", i-1, i)
Expand All @@ -1051,7 +1051,7 @@ func TestAnnotationUtils(t *testing.T) {
}
})

//Test Case 2: Check if annotations are set properly
// Test Case 2: Check if annotations are set properly
t.Run("SetReplicasAnnotations", func(t *testing.T) {
updated := SetReplicasAnnotations(&tRS, 10, 11)
if !updated {
Expand All @@ -1072,7 +1072,7 @@ func TestAnnotationUtils(t *testing.T) {
}
})

//Test Case 3: Check if annotations reflect deployments state
// Test Case 3: Check if annotations reflect deployments state
tRS.Annotations[DesiredReplicasAnnotation] = "1"
tRS.Status.AvailableReplicas = 1
tRS.Spec.Replicas = new(int32)
Expand All @@ -1084,7 +1084,7 @@ func TestAnnotationUtils(t *testing.T) {
t.Errorf("SetReplicasAnnotations Expected=true Obtained=false")
}
})
//Tear Down
// Tear Down
}

func TestReplicasAnnotationsNeedUpdate(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/nodeipam/ipam/range_allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(ctx context.Context, node *v1.Node
allocatedCIDRs[idx] = podCIDR
}

//queue the assignment
// queue the assignment
logger.V(4).Info("Putting node with CIDR into the work queue", "node", klog.KObj(node), "CIDRs", allocatedCIDRs)
return r.updateCIDRsAllocation(ctx, node.Name, allocatedCIDRs)
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/nodeipam/ipam/range_allocator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -566,7 +566,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
if len(updatedNode.Spec.PodCIDRs) == 0 {
continue // not assigned yet
}
//match
// match
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
Expand Down Expand Up @@ -655,7 +655,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
if len(updatedNode.Spec.PodCIDRs) == 0 {
continue // not assigned yet
}
//match
// match
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR {
if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR {
t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
Expand Down Expand Up @@ -827,7 +827,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
if len(updatedNode.Spec.PodCIDRs) == 0 {
continue // not assigned yet
}
//match
// match
for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound {
if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR {
t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/podautoscaler/replica_calculator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1101,7 +1101,7 @@ func TestReplicaCalcScaleDownContainerIgnoresFailedPods(t *testing.T) {
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}}, //TODO: Test is broken
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}}, // TODO: Test is broken

targetUtilization: 50,
expectedUtilization: 28,
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/statefulset/stateful_set_control.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ func (ssc *defaultStatefulSetControl) getStatefulSetRevisions(
return nil, nil, collisionCount, err
}
} else {
//if there is no equivalent revision we create a new one
// if there is no equivalent revision we create a new one
updateRevision, err = ssc.controllerHistory.CreateControllerRevision(set, updateRevision, &collisionCount)
if err != nil {
return nil, nil, collisionCount, err
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,14 +90,14 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
intreeToCSITranslator: csiTranslator,
}

//add the given node to the list of nodes managed by dsw
// add the given node to the list of nodes managed by dsw
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName))
logger, _ := ktesting.NewTestContext(t)
dswp.findAndAddActivePods(logger)

expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)

//check if the given volume referenced by the pod is added to dsw
// check if the given volume referenced by the pod is added to dsw
volumeExists := dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
if !volumeExists {
t.Fatalf(
Expand All @@ -106,10 +106,10 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
volumeExists)
}

//delete the pod and volume manually
// delete the pod and volume manually
dswp.desiredStateOfWorld.DeletePod(podName, expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))

//check if the given volume referenced by the pod still exists in dsw
// check if the given volume referenced by the pod still exists in dsw
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
if volumeExists {
t.Fatalf(
Expand All @@ -118,10 +118,10 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
volumeExists)
}

//add pod and volume again
// add pod and volume again
dswp.findAndAddActivePods(logger)

//check if the given volume referenced by the pod is added to dsw for the second time
// check if the given volume referenced by the pod is added to dsw for the second time
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
if !volumeExists {
t.Fatalf(
Expand All @@ -132,7 +132,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {

fakePodInformer.Informer().GetStore().Delete(pod)
dswp.findAndRemoveDeletedPods(logger)
//check if the given volume referenced by the pod still exists in dsw
// check if the given volume referenced by the pod still exists in dsw
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
if volumeExists {
t.Fatalf(
Expand Down Expand Up @@ -195,14 +195,14 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
intreeToCSITranslator: csiTranslator,
}

//add the given node to the list of nodes managed by dsw
// add the given node to the list of nodes managed by dsw
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName))
logger, _ := ktesting.NewTestContext(t)
dswp.findAndAddActivePods(logger)

expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)

//check if the given volume referenced by the pod is added to dsw
// check if the given volume referenced by the pod is added to dsw
volumeExists := dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
if !volumeExists {
t.Fatalf(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -910,7 +910,7 @@ func Test_Run_OneVolumeDetachOnOutOfServiceTaintedNode(t *testing.T) {
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)

// Delete the pod and the volume will be detached only after the maxLongWaitForUnmountDuration expires as volume is
//not unmounted. Here maxLongWaitForUnmountDuration is used to mimic that node is out of service.
// not unmounted. Here maxLongWaitForUnmountDuration is used to mimic that node is out of service.
// But in this case the node has the node.kubernetes.io/out-of-service taint and hence it will not wait for
// maxLongWaitForUnmountDuration and will progress to detach immediately.
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/volume/persistentvolume/binder_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ func TestSync(t *testing.T) {
{
// syncClaim with claim pre-bound to a PV that exists and is
// unbound, but its size is smaller than requested.
//Check that the claim status is reset to Pending
// Check that the claim status is reset to Pending
name: "2-9 - claim prebound to unbound volume that size is smaller than requested",
initialVolumes: newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
expectedVolumes: newVolumeArray("volume2-9", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain, classEmpty),
Expand Down

0 comments on commit 432325d

Please sign in to comment.