Skip to content

Commit

Permalink
Bump golangci-lint to v1.62.2 and fix all lint errors
Browse files Browse the repository at this point in the history
  • Loading branch information
AndiDog committed Dec 17, 2024
1 parent 95b1622 commit 090ab3c
Show file tree
Hide file tree
Showing 41 changed files with 167 additions and 151 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/pr-golangci-lint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,6 @@ jobs:
- name: golangci-lint
uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # tag=v6.1.1
with:
version: v1.56.1
version: v1.62.2
args: --out-format=colored-line-number
working-directory: ${{matrix.working-directory}}
4 changes: 3 additions & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,12 @@ linters:
- bidichk
- bodyclose
- containedctx
- copyloopvar
- dogsled
- dupword
- durationcheck
- errcheck
- errchkjson
- exportloopref
- gci
- ginkgolinter
- goconst
Expand All @@ -23,8 +23,10 @@ linters:
- gosec
- gosimple
- govet
- iface
- importas
- ineffassign
- intrange
- loggercheck
- misspell
- nakedret
Expand Down
2 changes: 1 addition & 1 deletion bootstrap/eks/controllers/eksconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1
files, err := r.resolveFiles(ctx, config)
if err != nil {
log.Info("Failed to resolve files for user data")
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, err.Error())
conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error())
return err
}

Expand Down
58 changes: 29 additions & 29 deletions bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,16 +54,16 @@ func TestEKSConfigReconciler(t *testing.T) {
reconciler := EKSConfigReconciler{
Client: testEnv.Client,
}
t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)
g.Eventually(func(gomega Gomega) {
err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine"))
gomega.Expect(err).NotTo(HaveOccurred())
}).Should(Succeed())

t.Logf(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name))
t.Logf("Secret '%s' should exist and be correct", config.Name)
secretList := &corev1.SecretList{}
testEnv.Client.List(ctx, secretList)
t.Logf(dump("secrets", secretList))
t.Log(dump("secrets", secretList))
secret := &corev1.Secret{}
g.Eventually(func(gomega Gomega) {
gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{
Expand Down Expand Up @@ -91,10 +91,10 @@ func TestEKSConfigReconciler(t *testing.T) {
},
}
config.Status.DataSecretName = &mp.Name
t.Logf(dump("amcp", amcp))
t.Logf(dump("config", config))
t.Logf(dump("machinepool", mp))
t.Logf(dump("cluster", cluster))
t.Log(dump("amcp", amcp))
t.Log(dump("config", config))
t.Log(dump("machinepool", mp))
t.Log(dump("cluster", cluster))
oldUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "test-value"})
g.Expect(err).To(BeNil())
expectedUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "updated-test-value"})
Expand All @@ -103,21 +103,21 @@ func TestEKSConfigReconciler(t *testing.T) {

amcpList := &ekscontrolplanev1.AWSManagedControlPlaneList{}
testEnv.Client.List(ctx, amcpList)
t.Logf(dump("stored-amcps", amcpList))
t.Log(dump("stored-amcps", amcpList))

reconciler := EKSConfigReconciler{
Client: testEnv.Client,
}
t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)
g.Eventually(func(gomega Gomega) {
err := reconciler.joinWorker(ctx, cluster, config, configOwner("MachinePool"))
gomega.Expect(err).NotTo(HaveOccurred())
}).Should(Succeed())

t.Logf(fmt.Sprintf("Secret '%s' should exist and be correct", config.Name))
t.Logf("Secret '%s' should exist and be correct", config.Name)
secretList := &corev1.SecretList{}
testEnv.Client.List(ctx, secretList)
t.Logf(dump("secrets", secretList))
t.Log(dump("secrets", secretList))

secret := &corev1.Secret{}
g.Eventually(func(gomega Gomega) {
Expand All @@ -132,15 +132,15 @@ func TestEKSConfigReconciler(t *testing.T) {
config.Spec.KubeletExtraArgs = map[string]string{
"test-arg": "updated-test-value",
}
t.Logf(dump("config", config))
t.Log(dump("config", config))
g.Eventually(func(gomega Gomega) {
err := reconciler.joinWorker(ctx, cluster, config, configOwner("MachinePool"))
gomega.Expect(err).NotTo(HaveOccurred())
}).Should(Succeed())
t.Logf(fmt.Sprintf("Secret '%s' should exist and be up to date", config.Name))
t.Logf("Secret '%s' should exist and be up to date", config.Name)

testEnv.Client.List(ctx, secretList)
t.Logf(dump("secrets", secretList))
t.Log(dump("secrets", secretList))
g.Eventually(func(gomega Gomega) {
gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{
Name: config.Name,
Expand All @@ -156,10 +156,10 @@ func TestEKSConfigReconciler(t *testing.T) {
cluster := newCluster(amcp.Name)
machine := newMachine(cluster, "test-machine")
config := newEKSConfig(machine)
t.Logf(dump("amcp", amcp))
t.Logf(dump("config", config))
t.Logf(dump("machine", machine))
t.Logf(dump("cluster", cluster))
t.Log(dump("amcp", amcp))
t.Log(dump("config", config))
t.Log(dump("machine", machine))
t.Log(dump("cluster", cluster))
expectedUserData, err := newUserData(cluster.Name, map[string]string{"test-arg": "test-value"})
g.Expect(err).To(BeNil())
g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed())
Expand All @@ -174,21 +174,21 @@ func TestEKSConfigReconciler(t *testing.T) {

amcpList := &ekscontrolplanev1.AWSManagedControlPlaneList{}
testEnv.Client.List(ctx, amcpList)
t.Logf(dump("stored-amcps", amcpList))
t.Log(dump("stored-amcps", amcpList))

reconciler := EKSConfigReconciler{
Client: testEnv.Client,
}
t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)
g.Eventually(func(gomega Gomega) {
err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine"))
gomega.Expect(err).NotTo(HaveOccurred())
}).Should(Succeed())

t.Logf(fmt.Sprintf("Secret '%s' should exist and be out of date", config.Name))
t.Logf("Secret '%s' should exist and be out of date", config.Name)
secretList := &corev1.SecretList{}
testEnv.Client.List(ctx, secretList)
t.Logf(dump("secrets", secretList))
t.Log(dump("secrets", secretList))

secret = &corev1.Secret{}
g.Eventually(func(gomega Gomega) {
Expand Down Expand Up @@ -226,11 +226,11 @@ func TestEKSConfigReconciler(t *testing.T) {
"secretKey": []byte(secretContent),
},
}
t.Logf(dump("amcp", amcp))
t.Logf(dump("config", config))
t.Logf(dump("machine", machine))
t.Logf(dump("cluster", cluster))
t.Logf(dump("secret", secret))
t.Log(dump("amcp", amcp))
t.Log(dump("config", config))
t.Log(dump("machine", machine))
t.Log(dump("cluster", cluster))
t.Log(dump("secret", secret))
g.Expect(testEnv.Client.Create(ctx, secret)).To(Succeed())
g.Expect(testEnv.Client.Create(ctx, amcp)).To(Succeed())

Expand All @@ -252,15 +252,15 @@ func TestEKSConfigReconciler(t *testing.T) {
reconciler := EKSConfigReconciler{
Client: testEnv.Client,
}
t.Logf(fmt.Sprintf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name))
t.Logf("Calling reconcile on cluster '%s' and config '%s' should requeue", cluster.Name, config.Name)
g.Eventually(func(gomega Gomega) {
err := reconciler.joinWorker(ctx, cluster, config, configOwner("Machine"))
gomega.Expect(err).NotTo(HaveOccurred())
}).Should(Succeed())

secretList := &corev1.SecretList{}
testEnv.Client.List(ctx, secretList)
t.Logf(dump("secrets", secretList))
t.Log(dump("secrets", secretList))
gotSecret := &corev1.Secret{}
g.Eventually(func(gomega Gomega) {
gomega.Expect(testEnv.Client.Get(ctx, client.ObjectKey{
Expand Down
1 change: 1 addition & 0 deletions cmd/clusterawsadm/ami/helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ func LatestPatchRelease(searchVersion string) (string, error) {
if err != nil {
return "", err
}
//#nosec G115
resp, err := http.Get(fmt.Sprintf(latestStableReleaseURL, "-"+strconv.Itoa(int(searchSemVer.Major))+"."+strconv.Itoa(int(searchSemVer.Minor))))
if err != nil {
return "", err
Expand Down
3 changes: 1 addition & 2 deletions cmd/clusterawsadm/cloudformation/bootstrap/template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package bootstrap

import (
"bytes"
"fmt"
"os"
"path"
"testing"
Expand Down Expand Up @@ -206,7 +205,7 @@ func TestRenderCloudformation(t *testing.T) {
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(string(tData), string(data), false)
out := dmp.DiffPrettyText(diffs)
t.Fatalf(fmt.Sprintf("Differing output (%s):\n%s", c.fixture, out))
t.Fatalf("Differing output (%s):\n%s", c.fixture, out)
}
})
}
Expand Down
8 changes: 4 additions & 4 deletions controllers/awscluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ func (r *AWSClusterReconciler) reconcileLoadBalancer(clusterScope *scope.Cluster

if err := elbService.ReconcileLoadbalancers(); err != nil {
clusterScope.Error(err, "failed to reconcile load balancer")
conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error())
conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error())
return nil, err
}

Expand Down Expand Up @@ -322,12 +322,12 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope)

if err := sgService.ReconcileSecurityGroups(); err != nil {
clusterScope.Error(err, "failed to reconcile security groups")
conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error())
conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error())
return reconcile.Result{}, err
}

if err := ec2Service.ReconcileBastion(); err != nil {
conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), err.Error())
conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error())
clusterScope.Error(err, "failed to reconcile bastion host")
return reconcile.Result{}, err
}
Expand All @@ -347,7 +347,7 @@ func (r *AWSClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope)
}

if err := s3Service.ReconcileBucket(); err != nil {
conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error())
return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name)
}
conditions.MarkTrue(awsCluster, infrav1.S3BucketReadyCondition)
Expand Down
21 changes: 10 additions & 11 deletions controllers/awsmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,
// all the other errors are blocking.
// Because we are reconciling all load balancers, attempt to treat the error as a list of errors.
if err = kerrors.FilterOut(err, elb.IsAccessDenied, elb.IsNotFound); err != nil {
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error())
return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err)
}
}
Expand Down Expand Up @@ -374,7 +374,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,

if err := ec2Service.TerminateInstance(instance.ID); err != nil {
machineScope.Error(err, "failed to terminate instance")
conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error())
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err)
return ctrl.Result{}, err
}
Expand Down Expand Up @@ -402,7 +402,7 @@ func (r *AWSMachineReconciler) reconcileDelete(machineScope *scope.MachineScope,
for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces {
if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil {
machineScope.Error(err, "failed to detach security groups from instance's network interfaces")
conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error())
return ctrl.Result{}, err
}
}
Expand Down Expand Up @@ -494,7 +494,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
instance, err := r.findInstance(machineScope, ec2svc)
if err != nil {
machineScope.Error(err, "unable to find instance")
conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, err.Error())
conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error())
return ctrl.Result{}, err
}

Expand Down Expand Up @@ -527,7 +527,7 @@ func (r *AWSMachineReconciler) reconcileNormal(_ context.Context, machineScope *
instance, err = r.createInstance(ec2svc, machineScope, clusterScope, objectStoreSvc)
if err != nil {
machineScope.Error(err, "unable to create instance")
conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error())
return ctrl.Result{}, err
}
}
Expand Down Expand Up @@ -661,7 +661,7 @@ func (r *AWSMachineReconciler) reconcileOperationalState(ec2svc services.EC2Inte
// Ensure that the security groups are correct.
_, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups)
if err != nil {
conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error())
machineScope.Error(err, "unable to ensure security groups")
return err
}
Expand Down Expand Up @@ -990,7 +990,7 @@ func (r *AWSMachineReconciler) registerInstanceToClassicLB(machineScope *scope.M
if err := elbsvc.RegisterInstanceWithAPIServerELB(i); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB",
"Failed to register control plane instance %q with classic load balancer: %v", i.ID, err)
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error())
return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer", i.ID)
}
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB",
Expand Down Expand Up @@ -1022,7 +1022,7 @@ func (r *AWSMachineReconciler) registerInstanceToV2LB(machineScope *scope.Machin
if err := elbsvc.RegisterInstanceWithAPIServerLB(instance, lb); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB",
"Failed to register control plane instance %q with load balancer: %v", instance.ID, err)
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error())
return errors.Wrapf(err, "could not register control plane instance %q with load balancer", instance.ID)
}
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB",
Expand All @@ -1046,7 +1046,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromClassicLB(machineScope *sco
if err := elbsvc.DeregisterInstanceFromAPIServerELB(instance); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
"Failed to deregister control plane instance %q from load balancer: %v", instance.ID, err)
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error())
return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", instance.ID)
}

Expand All @@ -1071,7 +1071,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromV2LB(machineScope *scope.Ma
if err := elbsvc.DeregisterInstanceFromAPIServerLB(targetGroupArn, i); err != nil {
r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB",
"Failed to deregister control plane instance %q from load balancer: %v", i.ID, err)
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, err.Error())
conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error())
return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID)
}
}
Expand Down Expand Up @@ -1141,7 +1141,6 @@ func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace,

result := make([]ctrl.Request, 0, len(machineList.Items))
for _, m := range machineList.Items {
m := m
log.WithValues("machine", klog.KObj(&m))
if m.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSMachine" {
log.Trace("Machine has an InfrastructureRef for a different type, will not add to reconciliation request.")
Expand Down
Loading

0 comments on commit 090ab3c

Please sign in to comment.