ci: revisit Go linter
This commit is contained in:
@@ -24,9 +24,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
type PodListerFunc func(context.Context) ([]*v1.Pod, error) //nolint:golint
|
||||
type PodListerFunc func(context.Context) ([]*v1.Pod, error)
|
||||
|
||||
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { //nolint:golint
|
||||
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc {
|
||||
if getPods == nil {
|
||||
return NotImplemented
|
||||
}
|
||||
|
||||
@@ -78,15 +78,15 @@ func newLeaseControllerWithRenewInterval(
|
||||
nodeController *NodeController) (*leaseController, error) {
|
||||
|
||||
if leaseDurationSeconds <= 0 {
|
||||
return nil, fmt.Errorf("Lease duration seconds %d is invalid, it must be > 0", leaseDurationSeconds)
|
||||
return nil, fmt.Errorf("lease duration seconds %d is invalid, it must be > 0", leaseDurationSeconds)
|
||||
}
|
||||
|
||||
if renewInterval == 0 {
|
||||
return nil, fmt.Errorf("Lease renew interval %s is invalid, it must be > 0", renewInterval.String())
|
||||
return nil, fmt.Errorf("lease renew interval %s is invalid, it must be > 0", renewInterval.String())
|
||||
}
|
||||
|
||||
if float64(leaseDurationSeconds) <= renewInterval.Seconds() {
|
||||
return nil, fmt.Errorf("Lease renew interval %s is invalid, it must be less than lease duration seconds %d", renewInterval.String(), leaseDurationSeconds)
|
||||
return nil, fmt.Errorf("lease renew interval %s is invalid, it must be less than lease duration seconds %d", renewInterval.String(), leaseDurationSeconds)
|
||||
}
|
||||
|
||||
return &leaseController{
|
||||
@@ -128,7 +128,7 @@ func (c *leaseController) sync(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
if node == nil {
|
||||
err = errors.New("Servernode is null")
|
||||
err = errors.New("server node is null")
|
||||
log.G(ctx).WithError(err).Error("servernode is null")
|
||||
span.SetStatus(err)
|
||||
return
|
||||
|
||||
@@ -441,7 +441,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
|
||||
// TODO(Sargun): Make this "smarter" about the status the pod is in.
|
||||
func(ev watch.Event) (bool, error) {
|
||||
pod := ev.Object.(*corev1.Pod)
|
||||
return pod.Name == p.ObjectMeta.Name, nil
|
||||
return pod.Name == p.Name, nil
|
||||
})
|
||||
|
||||
sendErr(ctx, watchErrCh, watchErr)
|
||||
@@ -628,7 +628,7 @@ func benchmarkCreatePods(ctx context.Context, b *testing.B, s *system) {
|
||||
type podModifier func(*corev1.Pod)
|
||||
|
||||
func randomizeUID(pod *corev1.Pod) {
|
||||
pod.ObjectMeta.UID = uuid.NewUUID()
|
||||
pod.UID = uuid.NewUUID()
|
||||
}
|
||||
|
||||
func randomizeName(pod *corev1.Pod) {
|
||||
@@ -638,7 +638,7 @@ func randomizeName(pod *corev1.Pod) {
|
||||
|
||||
func forRealAPIServer(pod *corev1.Pod) {
|
||||
pod.ResourceVersion = ""
|
||||
pod.ObjectMeta.UID = ""
|
||||
pod.UID = ""
|
||||
}
|
||||
|
||||
func nameBasedOnTest(t *testing.T) podModifier {
|
||||
|
||||
@@ -243,15 +243,15 @@ func buildKeyFromNames(namespace string, name string) (string, error) {
|
||||
|
||||
// buildKey is a helper for building the "key" for the providers pod store.
|
||||
func buildKey(pod *v1.Pod) (string, error) {
|
||||
if pod.ObjectMeta.Namespace == "" {
|
||||
if pod.Namespace == "" {
|
||||
return "", fmt.Errorf("pod namespace not found")
|
||||
}
|
||||
|
||||
if pod.ObjectMeta.Name == "" {
|
||||
if pod.Name == "" {
|
||||
return "", fmt.Errorf("pod name not found")
|
||||
}
|
||||
|
||||
return buildKeyFromNames(pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
|
||||
return buildKeyFromNames(pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
type mockProviderAsync struct {
|
||||
|
||||
20
node/node.go
20
node/node.go
@@ -146,7 +146,7 @@ func WithNodeEnableLeaseV1WithRenewInterval(client coordclientset.LeaseInterface
|
||||
n,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to configure lease controller: %w", err)
|
||||
return fmt.Errorf("unable to configure lease controller: %w", err)
|
||||
}
|
||||
|
||||
n.leaseController = leaseController
|
||||
@@ -327,9 +327,9 @@ func (n *NodeController) ensureNode(ctx context.Context, providerNode *corev1.No
|
||||
n.serverNodeLock.Unlock()
|
||||
// Bad things will happen if the node is deleted in k8s and recreated by someone else
|
||||
// we rely on this persisting
|
||||
providerNode.ObjectMeta.Name = node.Name
|
||||
providerNode.ObjectMeta.Namespace = node.Namespace
|
||||
providerNode.ObjectMeta.UID = node.UID
|
||||
providerNode.Name = node.Name
|
||||
providerNode.Namespace = node.Namespace
|
||||
providerNode.UID = node.UID
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -346,7 +346,9 @@ func (n *NodeController) controlLoop(ctx context.Context, providerNode *corev1.N
|
||||
|
||||
var sleepInterval time.Duration
|
||||
if n.leaseController == nil {
|
||||
log.G(ctx).WithField("pingInterval", n.pingInterval).Debug("lease controller is not enabled, updating node status in Kube API server at Ping Time Interval")
|
||||
log.G(ctx).
|
||||
WithField("pingInterval", n.pingInterval).
|
||||
Debug("lease controller is not enabled, updating node status in Kube API server at Ping Time Interval")
|
||||
sleepInterval = n.pingInterval
|
||||
} else {
|
||||
log.G(ctx).WithField("statusInterval", n.statusInterval).Debug("lease controller in use, updating at statusInterval")
|
||||
@@ -369,8 +371,8 @@ func (n *NodeController) controlLoop(ctx context.Context, providerNode *corev1.N
|
||||
log.G(ctx).Debug("Received node status update")
|
||||
|
||||
providerNode.Status = updated.Status
|
||||
providerNode.ObjectMeta.Annotations = updated.Annotations
|
||||
providerNode.ObjectMeta.Labels = updated.Labels
|
||||
providerNode.Annotations = updated.Annotations
|
||||
providerNode.Labels = updated.Labels
|
||||
if err := n.updateStatus(ctx, providerNode, false); err != nil {
|
||||
log.G(ctx).WithError(err).Error("Error handling node status update")
|
||||
}
|
||||
@@ -401,7 +403,7 @@ func (n *NodeController) updateStatus(ctx context.Context, providerNode *corev1.
|
||||
if result, err := n.nodePingController.getResult(ctx); err != nil {
|
||||
return err
|
||||
} else if result.error != nil {
|
||||
return fmt.Errorf("Not updating node status because node ping failed: %w", result.error)
|
||||
return fmt.Errorf("not updating node status because node ping failed: %w", result.error)
|
||||
}
|
||||
|
||||
updateNodeStatusHeartbeat(providerNode)
|
||||
@@ -429,7 +431,7 @@ func (n *NodeController) updateStatus(ctx context.Context, providerNode *corev1.
|
||||
}
|
||||
|
||||
// Returns a copy of the server node object
|
||||
func (n *NodeController) getServerNode(ctx context.Context) (*corev1.Node, error) {
|
||||
func (n *NodeController) getServerNode(_ context.Context) (*corev1.Node, error) {
|
||||
n.serverNodeLock.Lock()
|
||||
defer n.serverNodeLock.Unlock()
|
||||
if n.serverNode == nil {
|
||||
|
||||
10
node/pod.go
10
node/pod.go
@@ -126,8 +126,8 @@ func podsEqual(pod1, pod2 *corev1.Pod) bool {
|
||||
cmp.Equal(pod1.Spec.InitContainers, pod2.Spec.InitContainers) &&
|
||||
cmp.Equal(pod1.Spec.ActiveDeadlineSeconds, pod2.Spec.ActiveDeadlineSeconds) &&
|
||||
cmp.Equal(pod1.Spec.Tolerations, pod2.Spec.Tolerations) &&
|
||||
cmp.Equal(pod1.ObjectMeta.Labels, pod2.Labels) &&
|
||||
cmp.Equal(pod1.ObjectMeta.Annotations, pod2.Annotations)
|
||||
cmp.Equal(pod1.Labels, pod2.Labels) &&
|
||||
cmp.Equal(pod1.Annotations, pod2.Annotations)
|
||||
|
||||
}
|
||||
|
||||
@@ -310,10 +310,10 @@ func (pc *PodController) enqueuePodStatusUpdate(ctx context.Context, pod *corev1
|
||||
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
err = fmt.Errorf("Pod %q not found in pod lister: %w", key, err)
|
||||
log.G(ctx).WithError(err).Debug("Not enqueuing pod status update")
|
||||
err = fmt.Errorf("pod %q not found in pod lister: %w", key, err)
|
||||
log.G(ctx).WithError(err).Debug("not enqueuing pod status update")
|
||||
} else {
|
||||
log.G(ctx).WithError(err).Warn("Not enqueuing pod status update due to error from pod lister")
|
||||
log.G(ctx).WithError(err).Warn("not enqueuing pod status update due to error from pod lister")
|
||||
}
|
||||
span.SetStatus(err)
|
||||
return
|
||||
|
||||
@@ -212,8 +212,8 @@ func TestPodCreateNewPod(t *testing.T) {
|
||||
svr := newTestController()
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default" //nolint:goconst
|
||||
pod.ObjectMeta.Name = "nginx" //nolint:goconst
|
||||
pod.Namespace = "default" //nolint:goconst
|
||||
pod.Name = "nginx" //nolint:goconst
|
||||
pod.Spec = newPodSpec()
|
||||
|
||||
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
|
||||
@@ -229,8 +229,8 @@ func TestPodCreateNewPodWithNoDownwardAPIResolution(t *testing.T) {
|
||||
svr.skipDownwardAPIResolution = true
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default" //nolint:goconst
|
||||
pod.ObjectMeta.Name = "nginx" //nolint:goconst
|
||||
pod.Namespace = "default" //nolint:goconst
|
||||
pod.Name = "nginx" //nolint:goconst
|
||||
pod.Spec = newPodSpec()
|
||||
pod.Spec.Containers[0].Env = []corev1.EnvVar{
|
||||
{
|
||||
@@ -264,8 +264,8 @@ func TestPodUpdateExisting(t *testing.T) {
|
||||
svr := newTestController()
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
pod.ObjectMeta.Name = "nginx"
|
||||
pod.Namespace = "default"
|
||||
pod.Name = "nginx"
|
||||
pod.Spec = newPodSpec()
|
||||
|
||||
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
|
||||
@@ -288,8 +288,8 @@ func TestPodNoSpecChange(t *testing.T) {
|
||||
svr := newTestController()
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
pod.ObjectMeta.Name = "nginx"
|
||||
pod.Namespace = "default"
|
||||
pod.Name = "nginx"
|
||||
pod.Spec = newPodSpec()
|
||||
|
||||
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
|
||||
@@ -309,8 +309,8 @@ func TestPodStatusDelete(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := newTestController()
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
pod.ObjectMeta.Name = "nginx"
|
||||
pod.Namespace = "default"
|
||||
pod.Name = "nginx"
|
||||
pod.Spec = newPodSpec()
|
||||
fk8s := fake.NewSimpleClientset(pod)
|
||||
c.client = fk8s
|
||||
@@ -375,8 +375,8 @@ func TestReCreatePodRace(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := newTestController()
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
pod.ObjectMeta.Name = "nginx"
|
||||
pod.Namespace = "default"
|
||||
pod.Name = "nginx"
|
||||
pod.Spec = newPodSpec()
|
||||
pod.UID = "aaaaa"
|
||||
podCopy := pod.DeepCopy()
|
||||
|
||||
@@ -123,8 +123,8 @@ func TestPodEventFilter(t *testing.T) {
|
||||
}
|
||||
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
pod.ObjectMeta.Name = "nginx"
|
||||
pod.Namespace = "default"
|
||||
pod.Name = "nginx"
|
||||
pod.Spec = newPodSpec()
|
||||
|
||||
podC := tc.client.CoreV1().Pods(testNamespace)
|
||||
|
||||
@@ -158,7 +158,7 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
|
||||
ctx = addPodAttributes(ctx, span, podFromKubernetes)
|
||||
|
||||
var statusErr error
|
||||
podStatus, err := p.PodLifecycleHandler.GetPodStatus(ctx, podFromKubernetes.Namespace, podFromKubernetes.Name)
|
||||
podStatus, err := p.GetPodStatus(ctx, podFromKubernetes.Namespace, podFromKubernetes.Name)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
span.SetStatus(err)
|
||||
@@ -184,7 +184,7 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
|
||||
return nil
|
||||
}
|
||||
|
||||
if podFromKubernetes.Status.Phase != corev1.PodRunning && time.Since(podFromKubernetes.ObjectMeta.CreationTimestamp.Time) <= time.Minute {
|
||||
if podFromKubernetes.Status.Phase != corev1.PodRunning && time.Since(podFromKubernetes.CreationTimestamp.Time) <= time.Minute {
|
||||
span.SetStatus(statusErr)
|
||||
return statusErr
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user