Skip to content

Commit

Permalink
Fix lint issues reported by gocritic
Browse files Browse the repository at this point in the history
  • Loading branch information
alexandear committed May 14, 2024
1 parent 5f455b0 commit 6833799
Show file tree
Hide file tree
Showing 22 changed files with 59 additions and 72 deletions.
7 changes: 1 addition & 6 deletions .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,9 @@ linters-settings:
gocritic:
enabled-checks:
- dupImport
disabled-checks: # temporarily disabled checks, will fix them later
disabled-checks:
- appendAssign
- assignOp
- captLocal
- commentFormatting
- elseif
- exitAfterDefer
- ifElseChain
goimports:
local-prefixes: sigs.k8s.io/kueue
govet:
Expand Down
6 changes: 2 additions & 4 deletions cmd/importer/pod/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,8 @@ func Check(ctx context.Context, c client.Client, cache *util.ImportCache, jobs u
var pv int32
if pc, found := cache.PriorityClasses[p.Spec.PriorityClassName]; found {
pv = pc.Value
} else {
if p.Spec.PriorityClassName != "" {
return false, fmt.Errorf("%q: %w", p.Spec.PriorityClassName, util.ErrPCNotFound)
}
} else if p.Spec.PriorityClassName != "" {
return false, fmt.Errorf("%q: %w", p.Spec.PriorityClassName, util.ErrPCNotFound)
}

log.V(2).Info("Successfully checked", "clusterQueue", klog.KObj(cq), "resourceFalvor", klog.KObj(rf), "priority", pv)
Expand Down
2 changes: 1 addition & 1 deletion cmd/importer/pod/import.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func Import(ctx context.Context, c client.Client, cache *util.ImportCache, jobs
return false, fmt.Errorf("creating workload: %w", err)
}

//make its admission and update its status
// make its admission and update its status
info := workload.NewInfo(wl)
cq := cache.ClusterQueues[string(lq.Spec.ClusterQueue)]
admission := kueue.Admission{
Expand Down
7 changes: 4 additions & 3 deletions pkg/config/validation.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,11 +171,12 @@ func validateIntegrations(c *configapi.Configuration, scheme *runtime.Scheme) fi
}
for idx, framework := range c.Integrations.ExternalFrameworks {
gvk, _ := schema.ParseKindArg(framework)
if gvk == nil {
switch {
case gvk == nil:
allErrs = append(allErrs, field.Invalid(integrationsExternalFrameworkPath.Index(idx), framework, "must be format, 'Kind.version.group.com'"))
} else if managedFrameworks.Has(gvk.String()) {
case managedFrameworks.Has(gvk.String()):
allErrs = append(allErrs, field.Duplicate(integrationsExternalFrameworkPath.Index(idx), framework))
} else {
default:
managedFrameworks = managedFrameworks.Insert(gvk.String())
}
}
Expand Down
6 changes: 2 additions & 4 deletions pkg/controller/admissionchecks/multikueue/admissioncheck.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,10 +97,8 @@ func (a *ACReconciler) Reconcile(ctx context.Context, req reconcile.Request) (re

if err != nil {
missingClusters = append(missingClusters, clusterName)
} else {
if !apimeta.IsStatusConditionTrue(cluster.Status.Conditions, kueuealpha.MultiKueueClusterActive) {
inactiveClusters = append(inactiveClusters, clusterName)
}
} else if !apimeta.IsStatusConditionTrue(cluster.Status.Conditions, kueuealpha.MultiKueueClusterActive) {
inactiveClusters = append(inactiveClusters, clusterName)
}
}
unusableClustersCount := len(missingClusters) + len(inactiveClusters)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ func (rc *remoteClient) startWatcher(ctx context.Context, kind string, w multiKu
// If the context is not yet Done , queue a reconcile to attempt reconnection
if ctx.Err() == nil {
oldReconnect := rc.pendingReconnect.Swap(true)
//reconnect if this is the first watch failing.
// reconnect if this is the first watch failing.
if !oldReconnect {
log.V(2).Info("Queue reconcile for reconnect", "cluster", rc.clusterName)
rc.queueWatchEndedEvent(ctx)
Expand All @@ -239,10 +239,8 @@ func (rc *remoteClient) queueWorkloadEvent(ctx context.Context, wlKey types.Name
localWl := &kueue.Workload{}
if err := rc.localClient.Get(ctx, wlKey, localWl); err == nil {
rc.wlUpdateCh <- event.GenericEvent{Object: localWl}
} else {
if !apierrors.IsNotFound(err) {
ctrl.LoggerFrom(ctx).Error(err, "reading local workload")
}
} else if !apierrors.IsNotFound(err) {
ctrl.LoggerFrom(ctx).Error(err, "reading local workload")
}
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/controller/admissionchecks/multikueue/workload_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -467,7 +467,7 @@ func TestWlReconcile(t *testing.T) {
AdmissionCheck(kueue.AdmissionCheckState{
Name: "ac1",
State: kueue.CheckStateReady,
LastTransitionTime: metav1.NewTime(time.Now().Add(-defaultWorkerLostTimeout / 2)), //50% of the timeout
LastTransitionTime: metav1.NewTime(time.Now().Add(-defaultWorkerLostTimeout / 2)), // 50% of the timeout
Message: `The workload got reservation on "worker1"`,
}).
ControllerReference(batchv1.SchemeGroupVersion.WithKind("Job"), "job1", "uid1").
Expand Down Expand Up @@ -605,7 +605,7 @@ func TestWlReconcile(t *testing.T) {
*baseWorkloadBuilder.Clone().
Label(kueuealpha.MultiKueueOriginLabel, defaultOrigin).
ReserveQuota(utiltesting.MakeAdmission("q1").Obj()).
QuotaReservedTime(time.Now().Add(-time.Minute)). //one minute ago
QuotaReservedTime(time.Now().Add(-time.Minute)). // one minute ago
Obj(),
},
worker2Jobs: []batchv1.Job{
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/admissionchecks/provisioning/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (c *Controller) Reconcile(ctx context.Context, req reconcile.Request) (reco
}

if !workload.HasQuotaReservation(wl) || apimeta.IsStatusConditionTrue(wl.Status.Conditions, kueue.WorkloadFinished) {
//1.2 workload has no reservation or is finished
// 1.2 workload has no reservation or is finished
log.V(5).Info("workload with no reservation, delete owned requests")
return reconcile.Result{}, c.deleteOwnedProvisionRequests(ctx, req.Namespace, req.Name)
}
Expand Down Expand Up @@ -222,7 +222,7 @@ func (c *Controller) syncOwnedProvisionRequest(ctx context.Context, wl *kueue.Wo
log := ctrl.LoggerFrom(ctx)
var requeAfter *time.Duration
for _, checkName := range relevantChecks {
//get the config
// get the config
prc, err := c.helper.ConfigForAdmissionCheck(ctx, checkName)
if err != nil {
// the check is not active
Expand Down Expand Up @@ -819,7 +819,7 @@ func remainingTime(prc *kueue.ProvisioningRequestConfig, failuresCount int32, la
maxBackoff := 30 * time.Minute
backoffDuration := defaultBackoff
for i := 1; i < int(failuresCount); i++ {
backoffDuration = backoffDuration * 2
backoffDuration *= 2
if backoffDuration >= maxBackoff {
backoffDuration = maxBackoff
break
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/core/workload_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,7 @@ func TestReconcile(t *testing.T) {
Message: "The workload is deactivated",
}).
// The fake test not allow to save state with nil values when updating by Patch/Apply. So we are skipping this case.
//RequeueState(ptr.To[int32](4), ptr.To(metav1.NewTime(testStartTime.Truncate(time.Second)))).
// RequeueState(ptr.To[int32](4), ptr.To(metav1.NewTime(testStartTime.Truncate(time.Second)))).
Obj(),
wantWorkload: utiltesting.MakeWorkload("wl", "ns").
Active(true).
Expand Down
6 changes: 2 additions & 4 deletions pkg/controller/jobs/job/job_webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,10 +98,8 @@ func (w *JobWebhook) validatePartialAdmissionCreate(job *Job) field.ErrorList {
v, err := strconv.Atoi(strVal)
if err != nil {
allErrs = append(allErrs, field.Invalid(minPodsCountAnnotationsPath, job.Annotations[JobMinParallelismAnnotation], err.Error()))
} else {
if int32(v) >= job.podsCount() || v <= 0 {
allErrs = append(allErrs, field.Invalid(minPodsCountAnnotationsPath, v, fmt.Sprintf("should be between 0 and %d", job.podsCount()-1)))
}
} else if int32(v) >= job.podsCount() || v <= 0 {
allErrs = append(allErrs, field.Invalid(minPodsCountAnnotationsPath, v, fmt.Sprintf("should be between 0 and %d", job.podsCount()-1)))
}
}
if strVal, found := job.Annotations[JobCompletionsEqualParallelismAnnotation]; found {
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/jobs/pod/expectations.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ func newUIDExpectations(name string) *expectationsStore {
}
}

func (e *expectationsStore) ExpectUIDs(log logr.Logger, key types.NamespacedName, UIDs []types.UID) {
log.V(3).Info("Expecting UIDs", "store", e.name, "key", key, "uids", UIDs)
expectedUIDs := sets.New[types.UID](UIDs...)
func (e *expectationsStore) ExpectUIDs(log logr.Logger, key types.NamespacedName, uids []types.UID) {
log.V(3).Info("Expecting UIDs", "store", e.name, "key", key, "uids", uids)
expectedUIDs := sets.New[types.UID](uids...)
e.Lock()
defer e.Unlock()

Expand Down
2 changes: 1 addition & 1 deletion pkg/queue/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ func TestClusterQueueToActive(t *testing.T) {
case <-condRec:
gotCondBeforeCleanup = true
case <-time.After(100 * time.Millisecond):
//nothing
// nothing
}

counterCancel()
Expand Down
14 changes: 6 additions & 8 deletions pkg/scheduler/flavorassigner/flavorassigner.go
Original file line number Diff line number Diff line change
Expand Up @@ -465,14 +465,12 @@ func (a *FlavorAssigner) findFlavorForPodSetResource(
bestAssignment = assignments
bestAssignmentMode = representativeMode
}
} else {
if representativeMode > bestAssignmentMode {
bestAssignment = assignments
bestAssignmentMode = representativeMode
if bestAssignmentMode == Fit {
// All the resources fit in the cohort, no need to check more flavors.
return bestAssignment, nil
}
} else if representativeMode > bestAssignmentMode {
bestAssignment = assignments
bestAssignmentMode = representativeMode
if bestAssignmentMode == Fit {
// All the resources fit in the cohort, no need to check more flavors.
return bestAssignment, nil
}
}
}
Expand Down
14 changes: 8 additions & 6 deletions pkg/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1748,11 +1748,12 @@ func TestSchedule(t *testing.T) {
for cqName, c := range snapshot.ClusterQueues {
for name, w := range c.Workloads {
gotWorkloads = append(gotWorkloads, *w.Obj)
if !workload.HasQuotaReservation(w.Obj) {
switch {
case !workload.HasQuotaReservation(w.Obj):
t.Errorf("Workload %s is not admitted by a clusterQueue, but it is found as member of clusterQueue %s in the cache", name, cqName)
} else if string(w.Obj.Status.Admission.ClusterQueue) != cqName {
case string(w.Obj.Status.Admission.ClusterQueue) != cqName:
t.Errorf("Workload %s is admitted by clusterQueue %s, but it is found as member of clusterQueue %s in the cache", name, w.Obj.Status.Admission.ClusterQueue, cqName)
} else {
default:
gotAssignments[name] = *w.Obj.Status.Admission
}
}
Expand Down Expand Up @@ -2344,11 +2345,12 @@ func TestLastSchedulingContext(t *testing.T) {
snapshot := cqCache.Snapshot()
for cqName, c := range snapshot.ClusterQueues {
for name, w := range c.Workloads {
if !workload.IsAdmitted(w.Obj) {
switch {
case !workload.IsAdmitted(w.Obj):
t.Errorf("Workload %s is not admitted by a clusterQueue, but it is found as member of clusterQueue %s in the cache", name, cqName)
} else if string(w.Obj.Status.Admission.ClusterQueue) != cqName {
case string(w.Obj.Status.Admission.ClusterQueue) != cqName:
t.Errorf("Workload %s is admitted by clusterQueue %s, but it is found as member of clusterQueue %s in the cache", name, w.Obj.Status.Admission.ClusterQueue, cqName)
} else {
default:
gotAssignments[name] = *w.Obj.Status.Admission
}
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/util/admissioncheck/admissioncheck.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,11 +116,11 @@ func refValidForGK(ref *kueue.AdmissionCheckParametersReference, gk schema.Group
return true, nil
}

func IndexerByConfigFunction(ControllerName string, gvk schema.GroupVersionKind) client.IndexerFunc {
func IndexerByConfigFunction(controllerName string, gvk schema.GroupVersionKind) client.IndexerFunc {
gk := gvk.GroupKind()
return func(obj client.Object) []string {
ac, isAc := obj.(*kueue.AdmissionCheck)
if !isAc || ac == nil || ac.Spec.ControllerName != ControllerName {
if !isAc || ac == nil || ac.Spec.ControllerName != controllerName {
return nil
}
if isvalid, _ := refValidForGK(ac.Spec.Parameters, gk); !isvalid {
Expand All @@ -131,14 +131,14 @@ func IndexerByConfigFunction(ControllerName string, gvk schema.GroupVersionKind)
}

// FilterForController - returns a list of check names controlled by ControllerName.
func FilterForController(ctx context.Context, c client.Client, states []kueue.AdmissionCheckState, ControllerName string) ([]string, error) {
func FilterForController(ctx context.Context, c client.Client, states []kueue.AdmissionCheckState, controllerName string) ([]string, error) {
var retActive []string
for _, state := range states {
ac := &kueue.AdmissionCheck{}

if err := c.Get(ctx, types.NamespacedName{Name: state.Name}, ac); client.IgnoreNotFound(err) != nil {
return nil, err
} else if err == nil && ac.Spec.ControllerName == ControllerName {
} else if err == nil && ac.Spec.ControllerName == controllerName {
retActive = append(retActive, ac.Name)
}
}
Expand Down
6 changes: 2 additions & 4 deletions pkg/util/resource/resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,8 @@ func mergeResourceList(a, b corev1.ResourceList, f resolveConflict) corev1.Resou
for k, vb := range b {
if va, exists := ret[k]; !exists {
ret[k] = vb.DeepCopy()
} else {
if f != nil {
ret[k] = f(va, vb)
}
} else if f != nil {
ret[k] = f(va, vb)
}
}
return ret
Expand Down
4 changes: 2 additions & 2 deletions pkg/util/testing/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,6 @@ func wrapSSAPatch(patch client.Patch) client.Patch {
// TreatSSAAsStrategicMerge - can be used as a SubResourcePatch interceptor function to treat SSA patches as StrategicMergePatchType.
// Note: By doing so the values set in the patch will be updated but the call will have no knowledge of FieldManagement when it
// comes to detecting conflicts between managers or removing fields that are missing from the patch.
func TreatSSAAsStrategicMerge(ctx context.Context, clnt client.Client, SubResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error {
return clnt.SubResource(SubResourceName).Patch(ctx, obj, wrapSSAPatch(patch), opts...)
func TreatSSAAsStrategicMerge(ctx context.Context, clnt client.Client, subResourceName string, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error {
return clnt.SubResource(subResourceName).Patch(ctx, obj, wrapSSAPatch(patch), opts...)
}
6 changes: 3 additions & 3 deletions pkg/util/testing/wrappers.go
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,7 @@ func (lr *LimitRangeWrapper) WithValue(member string, t corev1.ResourceName, q s
case "Default":
target = lr.Spec.Limits[0].Default
case "Max":
//nothing
// nothing
default:
panic("Unexpected member " + member)
}
Expand Down Expand Up @@ -1052,10 +1052,10 @@ func (mkc *MultiKueueClusterWrapper) Obj() *kueuealpha.MultiKueueCluster {
return &mkc.MultiKueueCluster
}

func (mkc *MultiKueueClusterWrapper) KubeConfig(LocationType kueuealpha.LocationType, location string) *MultiKueueClusterWrapper {
func (mkc *MultiKueueClusterWrapper) KubeConfig(locationType kueuealpha.LocationType, location string) *MultiKueueClusterWrapper {
mkc.Spec.KubeConfig = kueuealpha.KubeConfig{
Location: location,
LocationType: LocationType,
LocationType: locationType,
}
return mkc
}
Expand Down
7 changes: 4 additions & 3 deletions pkg/visibility/api/rest/pending_workloads_cq_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -340,13 +340,14 @@ func TestPendingWorkloadsInCQ(t *testing.T) {
}

info, err := pendingWorkloadsInCqRest.Get(ctx, tc.req.queueName, tc.req.queryParams)
if tc.wantErrMatch != nil {
switch {
case tc.wantErrMatch != nil:
if !tc.wantErrMatch(err) {
t.Errorf("Error differs: (-want,+got):\n%s", cmp.Diff(tc.wantResp.wantErr.Error(), err.Error()))
}
} else if err != nil {
case err != nil:
t.Error(err)
} else {
default:
pendingWorkloadsInfo := info.(*visibility.PendingWorkloadsSummary)
if diff := cmp.Diff(tc.wantResp.wantPendingWorkloads, pendingWorkloadsInfo.Items, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("Pending workloads differ: (-want,+got):\n%s", diff)
Expand Down
7 changes: 4 additions & 3 deletions pkg/visibility/api/rest/pending_workloads_lq_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -457,13 +457,14 @@ func TestPendingWorkloadsInLQ(t *testing.T) {

ctx = request.WithNamespace(ctx, tc.req.nsName)
info, err := pendingWorkloadsInLqRest.Get(ctx, tc.req.queueName, tc.req.queryParams)
if tc.wantErrMatch != nil {
switch {
case tc.wantErrMatch != nil:
if !tc.wantErrMatch(err) {
t.Errorf("Error differs: (-want,+got):\n%s", cmp.Diff(tc.wantResp.wantErr.Error(), err.Error()))
}
} else if err != nil {
case err != nil:
t.Error(err)
} else {
default:
pendingWorkloadsInfo := info.(*visibility.PendingWorkloadsSummary)
if diff := cmp.Diff(tc.wantResp.wantPendingWorkloads, pendingWorkloadsInfo.Items, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("Pending workloads differ: (-want,+got):\n%s", diff)
Expand Down
2 changes: 1 addition & 1 deletion pkg/workload/workload.go
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ func SetQuotaReservation(w *kueue.Workload, admission *kueue.Admission) {
}
apimeta.SetStatusCondition(&w.Status.Conditions, admittedCond)

//reset Evicted condition if present.
// reset Evicted condition if present.
if evictedCond := apimeta.FindStatusCondition(w.Status.Conditions, kueue.WorkloadEvicted); evictedCond != nil {
evictedCond.Status = metav1.ConditionFalse
evictedCond.Reason = "QuotaReserved"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ var _ = ginkgo.Describe("JobSet controller", ginkgo.Ordered, ginkgo.ContinueOnFa
return k8sClient.Get(ctx, key, wl)
}, util.Timeout, util.Interval).Should(testing.BeNotFoundError())
// check the original wl is still there
//gomega.Expect(k8sClient.Get(ctx, wlLookupKey, createdWorkload)).Should(gomega.Succeed())
gomega.Eventually(func() error {
return k8sClient.Get(ctx, wlLookupKey, createdWorkload)
}, util.Timeout, util.Interval).Should(gomega.Succeed())
Expand Down

0 comments on commit 6833799

Please sign in to comment.