ci: revisit Go linter

This commit is contained in:
Pires
2025-05-29 17:52:44 +01:00
committed by Pires
parent ca78265381
commit dd685be418
17 changed files with 144 additions and 93 deletions

View File

@@ -28,9 +28,9 @@ jobs:
go-version: ${{ env.GO_VERSION }}
cache: false
- uses: actions/checkout@v4
- uses: golangci/golangci-lint-action@v6
- uses: golangci/golangci-lint-action@v8
with:
version: v1.63.4
version: v2.1
args: --timeout=15m --config=.golangci.yml
skip-cache: true

34
.golangci.bck.yml Normal file
View File

@@ -0,0 +1,34 @@
issues:
exclude-use-default: false
exclude:
# EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). (is not checked|Errors unhandled)
exclude-dirs:
# This directory contains copy code from upstream kubernetes/kubernetes, skip it.
- internal/kubernetes
# This is mostly copied from upstream, rather than fixing that code here just ignore the errors.
- internal/podutils
linters:
enable:
- errcheck
- staticcheck
- unconvert
- gofmt
- goimports
- ineffassign
- govet
- unused
- misspell
- gosec
- copyloopvar # Checks for pointers to enclosing loop variables
- tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
- lll
linters-settings:
gosec:
excludes:
- G304 # Potential file inclusion via variable
lll:
line-length: 200

View File

@@ -1,36 +1,51 @@
timeout: 10m
issues:
exclude-use-default: false
exclude:
# EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). (is not checked|Errors unhandled)
exclude-dirs:
# This directory contains copy code from upstream kubernetes/kubernetes, skip it.
- internal/kubernetes
# This is mostly copied from upstream, rather than fixing that code here just ignore the errors.
- internal/podutils
version: "2"
linters:
enable:
- errcheck
- staticcheck
- copyloopvar
- gosec
- lll
- misspell
- unconvert
settings:
gosec:
excludes:
- G304
lll:
line-length: 200
exclusions:
generated: lax
rules:
- path: (.+)\.go$
text: Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). (is not checked|Errors unhandled)
paths:
- internal/kubernetes
- internal/podutils
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
- ineffassign
- govet
- unused
- misspell
- gosec
- copyloopvar # Checks for pointers to enclosing loop variables
- tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
- lll
exclusions:
generated: lax
paths:
- internal/kubernetes
- internal/podutils
- third_party$
- builtin$
- examples$
issues:
# Maximum issues count per one linter.
# Set to 0 to disable.
# Default: 50
max-issues-per-linter: 0
linters-settings:
gosec:
excludes:
- G304 # Potential file inclusion via variable
lll:
line-length: 200
# Maximum count of issues with the same text.
# Set to 0 to disable.
# Default: 3
max-same-issues: 0
# Make issues output unique by line.
# Default: true
uniq-by-line: false

View File

@@ -43,7 +43,7 @@ var (
*/
// MockProvider implements the virtual-kubelet provider interface and stores pods in memory.
type MockProvider struct { //nolint:golint
type MockProvider struct {
nodeName string
operatingSystem string
internalIP string
@@ -55,7 +55,7 @@ type MockProvider struct { //nolint:golint
}
// MockConfig contains a mock virtual-kubelet's configurable parameters.
type MockConfig struct { //nolint:golint
type MockConfig struct {
CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
Pods string `json:"pods,omitempty"`
@@ -123,17 +123,17 @@ func loadConfig(providerConfig, nodeName string) (config MockConfig, err error)
}
if _, err = resource.ParseQuantity(config.CPU); err != nil {
return config, fmt.Errorf("Invalid CPU value %v", config.CPU)
return config, fmt.Errorf("invalid CPU value %v", config.CPU)
}
if _, err = resource.ParseQuantity(config.Memory); err != nil {
return config, fmt.Errorf("Invalid memory value %v", config.Memory)
return config, fmt.Errorf("invalid memory value %v", config.Memory)
}
if _, err = resource.ParseQuantity(config.Pods); err != nil {
return config, fmt.Errorf("Invalid pods value %v", config.Pods)
return config, fmt.Errorf("invalid pods value %v", config.Pods)
}
for _, v := range config.Others {
if _, err = resource.ParseQuantity(v); err != nil {
return config, fmt.Errorf("Invalid other value %v", v)
return config, fmt.Errorf("invalid other value %v", v)
}
}
return config, nil
@@ -349,7 +349,7 @@ func (p *MockProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
return pods, nil
}
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { //nolint:golint
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) {
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") //nolint:staticcheck,ineffassign
defer span.End()
@@ -367,8 +367,8 @@ func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { //nolint
}
n.Status.NodeInfo.OperatingSystem = os
n.Status.NodeInfo.Architecture = "amd64"
n.ObjectMeta.Labels["alpha.service-controller.kubernetes.io/exclude-balancer"] = "true"
n.ObjectMeta.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
n.Labels["alpha.service-controller.kubernetes.io/exclude-balancer"] = "true"
n.Labels["node.kubernetes.io/exclude-from-external-load-balancers"] = "true"
}
// Capacity returns a resource list containing the capacity limits.
@@ -671,15 +671,15 @@ func buildKeyFromNames(namespace string, name string) (string, error) {
// buildKey is a helper for building the "key" for the providers pod store.
func buildKey(pod *v1.Pod) (string, error) {
if pod.ObjectMeta.Namespace == "" {
if pod.Namespace == "" {
return "", fmt.Errorf("pod namespace not found")
}
if pod.ObjectMeta.Name == "" {
if pod.Name == "" {
return "", fmt.Errorf("pod name not found")
}
return buildKeyFromNames(pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
return buildKeyFromNames(pod.Namespace, pod.Name)
}
// addAttributes adds the specified attributes to the provided span.

View File

@@ -13,7 +13,7 @@ type Store struct {
ls map[string]InitFunc
}
func NewStore() *Store { //nolint:golint
func NewStore() *Store {
return &Store{
ls: make(map[string]InitFunc),
}
@@ -71,4 +71,4 @@ type InitConfig struct {
ResourceManager *manager.ResourceManager
}
type InitFunc func(InitConfig) (Provider, error) //nolint:golint
type InitFunc func(InitConfig) (Provider, error)

View File

@@ -7,7 +7,7 @@ const (
OperatingSystemWindows = "windows"
)
type OperatingSystems map[string]bool //nolint:golint
type OperatingSystems map[string]bool
var (
// ValidOperatingSystems defines the group of operating systems
@@ -18,7 +18,7 @@ var (
}
)
func (o OperatingSystems) Names() []string { //nolint:golint
func (o OperatingSystems) Names() []string {
keys := make([]string, 0, len(o))
for k := range o {
keys = append(keys, k)

View File

@@ -118,7 +118,7 @@ func IsPodReady(pod *corev1.Pod) bool {
func (f *Framework) WaitUntilPodDeleted(namespace, name string) (*corev1.Pod, error) {
return f.WaitUntilPodCondition(namespace, name, func(event watchapi.Event) (bool, error) {
pod := event.Object.(*corev1.Pod)
return event.Type == watchapi.Deleted || pod.ObjectMeta.DeletionTimestamp != nil, nil
return event.Type == watchapi.Deleted || pod.DeletionTimestamp != nil, nil
})
}

View File

@@ -24,9 +24,9 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
)
type PodListerFunc func(context.Context) ([]*v1.Pod, error) //nolint:golint
type PodListerFunc func(context.Context) ([]*v1.Pod, error)
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { //nolint:golint
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc {
if getPods == nil {
return NotImplemented
}

View File

@@ -78,15 +78,15 @@ func newLeaseControllerWithRenewInterval(
nodeController *NodeController) (*leaseController, error) {
if leaseDurationSeconds <= 0 {
return nil, fmt.Errorf("Lease duration seconds %d is invalid, it must be > 0", leaseDurationSeconds)
return nil, fmt.Errorf("lease duration seconds %d is invalid, it must be > 0", leaseDurationSeconds)
}
if renewInterval == 0 {
return nil, fmt.Errorf("Lease renew interval %s is invalid, it must be > 0", renewInterval.String())
return nil, fmt.Errorf("lease renew interval %s is invalid, it must be > 0", renewInterval.String())
}
if float64(leaseDurationSeconds) <= renewInterval.Seconds() {
return nil, fmt.Errorf("Lease renew interval %s is invalid, it must be less than lease duration seconds %d", renewInterval.String(), leaseDurationSeconds)
return nil, fmt.Errorf("lease renew interval %s is invalid, it must be less than lease duration seconds %d", renewInterval.String(), leaseDurationSeconds)
}
return &leaseController{
@@ -128,7 +128,7 @@ func (c *leaseController) sync(ctx context.Context) {
return
}
if node == nil {
err = errors.New("Servernode is null")
err = errors.New("server node is null")
log.G(ctx).WithError(err).Error("servernode is null")
span.SetStatus(err)
return

View File

@@ -441,7 +441,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
// TODO(Sargun): Make this "smarter" about the status the pod is in.
func(ev watch.Event) (bool, error) {
pod := ev.Object.(*corev1.Pod)
return pod.Name == p.ObjectMeta.Name, nil
return pod.Name == p.Name, nil
})
sendErr(ctx, watchErrCh, watchErr)
@@ -628,7 +628,7 @@ func benchmarkCreatePods(ctx context.Context, b *testing.B, s *system) {
type podModifier func(*corev1.Pod)
func randomizeUID(pod *corev1.Pod) {
pod.ObjectMeta.UID = uuid.NewUUID()
pod.UID = uuid.NewUUID()
}
func randomizeName(pod *corev1.Pod) {
@@ -638,7 +638,7 @@ func randomizeName(pod *corev1.Pod) {
func forRealAPIServer(pod *corev1.Pod) {
pod.ResourceVersion = ""
pod.ObjectMeta.UID = ""
pod.UID = ""
}
func nameBasedOnTest(t *testing.T) podModifier {

View File

@@ -243,15 +243,15 @@ func buildKeyFromNames(namespace string, name string) (string, error) {
// buildKey is a helper for building the "key" for the providers pod store.
func buildKey(pod *v1.Pod) (string, error) {
if pod.ObjectMeta.Namespace == "" {
if pod.Namespace == "" {
return "", fmt.Errorf("pod namespace not found")
}
if pod.ObjectMeta.Name == "" {
if pod.Name == "" {
return "", fmt.Errorf("pod name not found")
}
return buildKeyFromNames(pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
return buildKeyFromNames(pod.Namespace, pod.Name)
}
type mockProviderAsync struct {

View File

@@ -146,7 +146,7 @@ func WithNodeEnableLeaseV1WithRenewInterval(client coordclientset.LeaseInterface
n,
)
if err != nil {
return fmt.Errorf("Unable to configure lease controller: %w", err)
return fmt.Errorf("unable to configure lease controller: %w", err)
}
n.leaseController = leaseController
@@ -327,9 +327,9 @@ func (n *NodeController) ensureNode(ctx context.Context, providerNode *corev1.No
n.serverNodeLock.Unlock()
// Bad things will happen if the node is deleted in k8s and recreated by someone else
// we rely on this persisting
providerNode.ObjectMeta.Name = node.Name
providerNode.ObjectMeta.Namespace = node.Namespace
providerNode.ObjectMeta.UID = node.UID
providerNode.Name = node.Name
providerNode.Namespace = node.Namespace
providerNode.UID = node.UID
return nil
}
@@ -346,7 +346,9 @@ func (n *NodeController) controlLoop(ctx context.Context, providerNode *corev1.N
var sleepInterval time.Duration
if n.leaseController == nil {
log.G(ctx).WithField("pingInterval", n.pingInterval).Debug("lease controller is not enabled, updating node status in Kube API server at Ping Time Interval")
log.G(ctx).
WithField("pingInterval", n.pingInterval).
Debug("lease controller is not enabled, updating node status in Kube API server at Ping Time Interval")
sleepInterval = n.pingInterval
} else {
log.G(ctx).WithField("statusInterval", n.statusInterval).Debug("lease controller in use, updating at statusInterval")
@@ -369,8 +371,8 @@ func (n *NodeController) controlLoop(ctx context.Context, providerNode *corev1.N
log.G(ctx).Debug("Received node status update")
providerNode.Status = updated.Status
providerNode.ObjectMeta.Annotations = updated.Annotations
providerNode.ObjectMeta.Labels = updated.Labels
providerNode.Annotations = updated.Annotations
providerNode.Labels = updated.Labels
if err := n.updateStatus(ctx, providerNode, false); err != nil {
log.G(ctx).WithError(err).Error("Error handling node status update")
}
@@ -401,7 +403,7 @@ func (n *NodeController) updateStatus(ctx context.Context, providerNode *corev1.
if result, err := n.nodePingController.getResult(ctx); err != nil {
return err
} else if result.error != nil {
return fmt.Errorf("Not updating node status because node ping failed: %w", result.error)
return fmt.Errorf("not updating node status because node ping failed: %w", result.error)
}
updateNodeStatusHeartbeat(providerNode)
@@ -429,7 +431,7 @@ func (n *NodeController) updateStatus(ctx context.Context, providerNode *corev1.
}
// Returns a copy of the server node object
func (n *NodeController) getServerNode(ctx context.Context) (*corev1.Node, error) {
func (n *NodeController) getServerNode(_ context.Context) (*corev1.Node, error) {
n.serverNodeLock.Lock()
defer n.serverNodeLock.Unlock()
if n.serverNode == nil {

View File

@@ -126,8 +126,8 @@ func podsEqual(pod1, pod2 *corev1.Pod) bool {
cmp.Equal(pod1.Spec.InitContainers, pod2.Spec.InitContainers) &&
cmp.Equal(pod1.Spec.ActiveDeadlineSeconds, pod2.Spec.ActiveDeadlineSeconds) &&
cmp.Equal(pod1.Spec.Tolerations, pod2.Spec.Tolerations) &&
cmp.Equal(pod1.ObjectMeta.Labels, pod2.Labels) &&
cmp.Equal(pod1.ObjectMeta.Annotations, pod2.Annotations)
cmp.Equal(pod1.Labels, pod2.Labels) &&
cmp.Equal(pod1.Annotations, pod2.Annotations)
}
@@ -310,10 +310,10 @@ func (pc *PodController) enqueuePodStatusUpdate(ctx context.Context, pod *corev1
if err != nil {
if errors.IsNotFound(err) {
err = fmt.Errorf("Pod %q not found in pod lister: %w", key, err)
log.G(ctx).WithError(err).Debug("Not enqueuing pod status update")
err = fmt.Errorf("pod %q not found in pod lister: %w", key, err)
log.G(ctx).WithError(err).Debug("not enqueuing pod status update")
} else {
log.G(ctx).WithError(err).Warn("Not enqueuing pod status update due to error from pod lister")
log.G(ctx).WithError(err).Warn("not enqueuing pod status update due to error from pod lister")
}
span.SetStatus(err)
return

View File

@@ -212,8 +212,8 @@ func TestPodCreateNewPod(t *testing.T) {
svr := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default" //nolint:goconst
pod.ObjectMeta.Name = "nginx" //nolint:goconst
pod.Namespace = "default" //nolint:goconst
pod.Name = "nginx" //nolint:goconst
pod.Spec = newPodSpec()
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
@@ -229,8 +229,8 @@ func TestPodCreateNewPodWithNoDownwardAPIResolution(t *testing.T) {
svr.skipDownwardAPIResolution = true
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default" //nolint:goconst
pod.ObjectMeta.Name = "nginx" //nolint:goconst
pod.Namespace = "default" //nolint:goconst
pod.Name = "nginx" //nolint:goconst
pod.Spec = newPodSpec()
pod.Spec.Containers[0].Env = []corev1.EnvVar{
{
@@ -264,8 +264,8 @@ func TestPodUpdateExisting(t *testing.T) {
svr := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Namespace = "default"
pod.Name = "nginx"
pod.Spec = newPodSpec()
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
@@ -288,8 +288,8 @@ func TestPodNoSpecChange(t *testing.T) {
svr := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Namespace = "default"
pod.Name = "nginx"
pod.Spec = newPodSpec()
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
@@ -309,8 +309,8 @@ func TestPodStatusDelete(t *testing.T) {
ctx := context.Background()
c := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Namespace = "default"
pod.Name = "nginx"
pod.Spec = newPodSpec()
fk8s := fake.NewSimpleClientset(pod)
c.client = fk8s
@@ -375,8 +375,8 @@ func TestReCreatePodRace(t *testing.T) {
ctx := context.Background()
c := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Namespace = "default"
pod.Name = "nginx"
pod.Spec = newPodSpec()
pod.UID = "aaaaa"
podCopy := pod.DeepCopy()

View File

@@ -123,8 +123,8 @@ func TestPodEventFilter(t *testing.T) {
}
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Namespace = "default"
pod.Name = "nginx"
pod.Spec = newPodSpec()
podC := tc.client.CoreV1().Pods(testNamespace)

View File

@@ -158,7 +158,7 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
ctx = addPodAttributes(ctx, span, podFromKubernetes)
var statusErr error
podStatus, err := p.PodLifecycleHandler.GetPodStatus(ctx, podFromKubernetes.Namespace, podFromKubernetes.Name)
podStatus, err := p.GetPodStatus(ctx, podFromKubernetes.Namespace, podFromKubernetes.Name)
if err != nil {
if !errdefs.IsNotFound(err) {
span.SetStatus(err)
@@ -184,7 +184,7 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
return nil
}
if podFromKubernetes.Status.Phase != corev1.PodRunning && time.Since(podFromKubernetes.ObjectMeta.CreationTimestamp.Time) <= time.Minute {
if podFromKubernetes.Status.Phase != corev1.PodRunning && time.Since(podFromKubernetes.CreationTimestamp.Time) <= time.Minute {
span.SetStatus(statusErr)
return statusErr
}

View File

@@ -251,8 +251,8 @@ func (ts *EndToEndTestSuite) TestPodLifecycleGracefulDelete(t *testing.T) {
// Make sure we saw the delete event, and the delete event was graceful
assert.Assert(t, podLast != nil)
assert.Assert(t, podLast.ObjectMeta.GetDeletionGracePeriodSeconds() != nil)
assert.Assert(t, *podLast.ObjectMeta.GetDeletionGracePeriodSeconds() > 0)
assert.Assert(t, podLast.GetDeletionGracePeriodSeconds() != nil)
assert.Assert(t, *podLast.GetDeletionGracePeriodSeconds() > 0)
}
// TestPodLifecycleForceDelete creates one podsand verifies that the provider has created them