Merge pull request #755 from sargun/fix-golang-lint

Fix golang lint
This commit is contained in:
Brian Goff
2019-09-03 11:25:21 -07:00
committed by GitHub
12 changed files with 57 additions and 155 deletions

View File

@@ -15,10 +15,10 @@ jobs:
command: V=1 CI=1 make vet command: V=1 CI=1 make vet
- run: - run:
name: Install linters name: Install linters
command: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s v1.15.0 && mv ./bin/* /go/bin/ command: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s v1.17.1 && mv ./bin/* /go/bin/
- run: - run:
name: Lint name: Lint
command: golangci-lint run --new-from-rev "HEAD~$(git rev-list master.. --count)" ./... command: golangci-lint run ./...
- run: - run:
name: Dependencies name: Dependencies
command: scripts/validate/gomod.sh command: scripts/validate/gomod.sh

View File

@@ -69,7 +69,7 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
flags.StringVar(&c.TaintKey, "taint", c.TaintKey, "Set node taint key") flags.StringVar(&c.TaintKey, "taint", c.TaintKey, "Set node taint key")
flags.BoolVar(&c.DisableTaint, "disable-taint", c.DisableTaint, "disable the virtual-kubelet node taint") flags.BoolVar(&c.DisableTaint, "disable-taint", c.DisableTaint, "disable the virtual-kubelet node taint")
flags.MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable") flags.MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable") //nolint:errcheck
flags.IntVar(&c.PodSyncWorkers, "pod-sync-workers", c.PodSyncWorkers, `set the number of pod synchronization workers`) flags.IntVar(&c.PodSyncWorkers, "pod-sync-workers", c.PodSyncWorkers, `set the number of pod synchronization workers`)
flags.BoolVar(&c.EnableNodeLease, "enable-node-lease", c.EnableNodeLease, `use node leases (1.13) for node heartbeats`) flags.BoolVar(&c.EnableNodeLease, "enable-node-lease", c.EnableNodeLease, `use node leases (1.13) for node heartbeats`)

View File

@@ -42,7 +42,7 @@ var (
*/ */
// MockV0Provider implements the virtual-kubelet provider interface and stores pods in memory. // MockV0Provider implements the virtual-kubelet provider interface and stores pods in memory.
type MockV0Provider struct { type MockV0Provider struct { //nolint:golint
nodeName string nodeName string
operatingSystem string operatingSystem string
internalIP string internalIP string
@@ -54,12 +54,12 @@ type MockV0Provider struct {
} }
// MockProvider is like MockV0Provider, but implements the PodNotifier interface // MockProvider is like MockV0Provider, but implements the PodNotifier interface
type MockProvider struct { type MockProvider struct { //nolint:golint
*MockV0Provider *MockV0Provider
} }
// MockConfig contains a mock virtual-kubelet's configurable parameters. // MockConfig contains a mock virtual-kubelet's configurable parameters.
type MockConfig struct { type MockConfig struct { //nolint:golint
CPU string `json:"cpu,omitempty"` CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"` Memory string `json:"memory,omitempty"`
Pods string `json:"pods,omitempty"` Pods string `json:"pods,omitempty"`
@@ -355,7 +355,7 @@ func (p *MockV0Provider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
} }
func (p *MockV0Provider) ConfigureNode(ctx context.Context, n *v1.Node) { func (p *MockV0Provider) ConfigureNode(ctx context.Context, n *v1.Node) {
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") //nolint:ineffassign
defer span.End() defer span.End()
n.Status.Capacity = p.capacity() n.Status.Capacity = p.capacity()
@@ -453,7 +453,8 @@ func (p *MockV0Provider) nodeDaemonEndpoints() v1.NodeDaemonEndpoints {
// GetStatsSummary returns dummy stats for all pods known by this provider. // GetStatsSummary returns dummy stats for all pods known by this provider.
func (p *MockV0Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) { func (p *MockV0Provider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
ctx, span := trace.StartSpan(ctx, "GetStatsSummary") var span trace.Span
ctx, span = trace.StartSpan(ctx, "GetStatsSummary") //nolint: ineffassign
defer span.End() defer span.End()
// Grab the current timestamp so we can report it as the time the stats were generated. // Grab the current timestamp so we can report it as the time the stats were generated.

View File

@@ -6,7 +6,7 @@ import (
) )
func registerMock(s *provider.Store) { func registerMock(s *provider.Store) {
s.Register("mock", func(cfg provider.InitConfig) (provider.Provider, error) { s.Register("mock", func(cfg provider.InitConfig) (provider.Provider, error) { //nolint:errcheck
return mock.NewMockProvider( return mock.NewMockProvider(
cfg.ConfigPath, cfg.ConfigPath,
cfg.NodeName, cfg.NodeName,
@@ -16,7 +16,7 @@ func registerMock(s *provider.Store) {
) )
}) })
s.Register("mockV0", func(cfg provider.InitConfig) (provider.Provider, error) { s.Register("mockV0", func(cfg provider.InitConfig) (provider.Provider, error) { //nolint:errcheck
return mock.NewMockProvider( return mock.NewMockProvider(
cfg.ConfigPath, cfg.ConfigPath,
cfg.NodeName, cfg.NodeName,

View File

@@ -17,13 +17,13 @@ package manager_test
import ( import (
"testing" "testing"
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
testutil "github.com/virtual-kubelet/virtual-kubelet/internal/test/util"
"gotest.tools/assert"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
corev1listers "k8s.io/client-go/listers/core/v1" corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
testutil "github.com/virtual-kubelet/virtual-kubelet/internal/test/util"
) )
// TestGetPods verifies that the resource manager acts as a passthrough to a pod lister. // TestGetPods verifies that the resource manager acts as a passthrough to a pod lister.
@@ -38,7 +38,7 @@ func TestGetPods(t *testing.T) {
// Create a pod lister that will list the pods defined above. // Create a pod lister that will list the pods defined above.
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
for _, pod := range lsPods { for _, pod := range lsPods {
indexer.Add(pod) assert.NilError(t, indexer.Add(pod))
} }
podLister := corev1listers.NewPodLister(indexer) podLister := corev1listers.NewPodLister(indexer)
@@ -67,7 +67,7 @@ func TestGetSecret(t *testing.T) {
// Create a secret lister that will list the secrets defined above. // Create a secret lister that will list the secrets defined above.
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
for _, secret := range lsSecrets { for _, secret := range lsSecrets {
indexer.Add(secret) assert.NilError(t, indexer.Add(secret))
} }
secretLister := corev1listers.NewSecretLister(indexer) secretLister := corev1listers.NewSecretLister(indexer)
@@ -106,7 +106,7 @@ func TestGetConfigMap(t *testing.T) {
// Create a config map lister that will list the config maps defined above. // Create a config map lister that will list the config maps defined above.
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
for _, secret := range lsConfigMaps { for _, secret := range lsConfigMaps {
indexer.Add(secret) assert.NilError(t, indexer.Add(secret))
} }
configMapLister := corev1listers.NewConfigMapLister(indexer) configMapLister := corev1listers.NewConfigMapLister(indexer)
@@ -145,7 +145,7 @@ func TestListServices(t *testing.T) {
// Create a pod lister that will list the pods defined above. // Create a pod lister that will list the pods defined above.
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
for _, service := range lsServices { for _, service := range lsServices {
indexer.Add(service) assert.NilError(t, indexer.Add(service))
} }
serviceLister := corev1listers.NewServiceLister(indexer) serviceLister := corev1listers.NewServiceLister(indexer)

View File

@@ -33,7 +33,7 @@ func handleError(f handlerFunc) http.HandlerFunc {
code := httpStatusCode(err) code := httpStatusCode(err)
w.WriteHeader(code) w.WriteHeader(code)
io.WriteString(w, err.Error()) io.WriteString(w, err.Error()) //nolint:errcheck
logger := log.G(req.Context()).WithError(err).WithField("httpStatusCode", code) logger := log.G(req.Context()).WithError(err).WithField("httpStatusCode", code)
if code >= 500 { if code >= 500 {

View File

@@ -28,7 +28,7 @@ type PodListerFunc func(context.Context) ([]*v1.Pod, error)
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc {
scheme := runtime.NewScheme() scheme := runtime.NewScheme()
v1.SchemeBuilder.AddToScheme(scheme) v1.SchemeBuilder.AddToScheme(scheme) //nolint:errcheck
codecs := serializer.NewCodecFactory(scheme) codecs := serializer.NewCodecFactory(scheme)
return handleError(func(w http.ResponseWriter, req *http.Request) error { return handleError(func(w http.ResponseWriter, req *http.Request) error {

View File

@@ -23,11 +23,11 @@ import (
kubeinformers "k8s.io/client-go/informers" kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
ktesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
watchutils "k8s.io/client-go/tools/watch" watchutils "k8s.io/client-go/tools/watch"
"k8s.io/klog" "k8s.io/klog"
ktesting "k8s.io/client-go/testing"
) )
var ( var (

View File

@@ -58,7 +58,7 @@ func (w *waitableInt) until(ctx context.Context, f func(int) bool) error {
func (w *waitableInt) increment() { func (w *waitableInt) increment() {
w.cond.L.Lock() w.cond.L.Lock()
defer w.cond.L.Unlock() defer w.cond.L.Unlock()
w.val += 1 w.val++
w.cond.Broadcast() w.cond.Broadcast()
} }

View File

@@ -38,7 +38,7 @@ import (
// //
// Note: Implementers can choose to manage a node themselves, in which case // Note: Implementers can choose to manage a node themselves, in which case
// it is not needed to provide an implementation for this interface. // it is not needed to provide an implementation for this interface.
type NodeProvider interface { type NodeProvider interface { //nolint:golint
// Ping checks if the node is still active. // Ping checks if the node is still active.
// This is intended to be lightweight as it will be called periodically as a // This is intended to be lightweight as it will be called periodically as a
// heartbeat to keep the node marked as ready in Kubernetes. // heartbeat to keep the node marked as ready in Kubernetes.

View File

@@ -71,80 +71,25 @@ func TestPodsEqual(t *testing.T) {
}, },
} }
p2 := &corev1.Pod{ p2 := p1.DeepCopy()
Spec: corev1.PodSpec{
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12-perl",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
},
}
assert.Assert(t, podsEqual(p1, p2)) assert.Assert(t, podsEqual(p1, p2))
} }
func TestPodsDifferent(t *testing.T) { func TestPodsDifferent(t *testing.T) {
p1 := &corev1.Pod{ p1 := &corev1.Pod{
Spec: corev1.PodSpec{ Spec: newPodSpec(),
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
},
} }
p2 := &corev1.Pod{ p2 := p1.DeepCopy()
Spec: corev1.PodSpec{ p2.Spec.Containers[0].Image = "nginx:1.15.12-perl"
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12-perl",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
},
}
assert.Assert(t, !podsEqual(p1, p2)) assert.Assert(t, !podsEqual(p1, p2))
} }
func TestPodsDifferentIgnoreValue(t *testing.T) { func TestPodsDifferentIgnoreValue(t *testing.T) {
p1 := &corev1.Pod{ p1 := &corev1.Pod{
Spec: corev1.PodSpec{ Spec: newPodSpec(),
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
},
} }
p2 := p1.DeepCopy() p2 := p1.DeepCopy()
@@ -157,22 +102,9 @@ func TestPodCreateNewPod(t *testing.T) {
svr := newTestController() svr := newTestController()
pod := &corev1.Pod{} pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default" pod.ObjectMeta.Namespace = "default" //nolint:goconst
pod.ObjectMeta.Name = "nginx" pod.ObjectMeta.Name = "nginx" //nolint:goconst
pod.Spec = corev1.PodSpec{ pod.Spec = newPodSpec()
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
}
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy()) err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
@@ -188,43 +120,15 @@ func TestPodUpdateExisting(t *testing.T) {
pod := &corev1.Pod{} pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default" pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx" pod.ObjectMeta.Name = "nginx"
pod.Spec = corev1.PodSpec{ pod.Spec = newPodSpec()
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
}
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy()) err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
assert.Check(t, is.Nil(err)) assert.Check(t, is.Nil(err))
assert.Check(t, is.Equal(svr.mock.creates.read(), 1)) assert.Check(t, is.Equal(svr.mock.creates.read(), 1))
assert.Check(t, is.Equal(svr.mock.updates.read(), 0)) assert.Check(t, is.Equal(svr.mock.updates.read(), 0))
pod2 := &corev1.Pod{} pod2 := pod.DeepCopy()
pod2.ObjectMeta.Namespace = "default" pod2.Spec.Containers[0].Image = "nginx:1.15.12-perl"
pod2.ObjectMeta.Name = "nginx"
pod2.Spec = corev1.PodSpec{
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12-perl",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
}
err = svr.createOrUpdatePod(context.Background(), pod2.DeepCopy()) err = svr.createOrUpdatePod(context.Background(), pod2.DeepCopy())
assert.Check(t, is.Nil(err)) assert.Check(t, is.Nil(err))
@@ -240,20 +144,7 @@ func TestPodNoSpecChange(t *testing.T) {
pod := &corev1.Pod{} pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default" pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx" pod.ObjectMeta.Name = "nginx"
pod.Spec = corev1.PodSpec{ pod.Spec = newPodSpec()
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
}
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy()) err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
assert.Check(t, is.Nil(err)) assert.Check(t, is.Nil(err))
@@ -288,14 +179,7 @@ func TestPodDelete(t *testing.T) {
pod := &corev1.Pod{} pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default" pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx" pod.ObjectMeta.Name = "nginx"
pod.Spec = corev1.PodSpec{ pod.Spec = newPodSpec()
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12",
},
},
}
pc := c.client.CoreV1().Pods("default") pc := c.client.CoreV1().Pods("default")
@@ -327,3 +211,20 @@ func TestPodDelete(t *testing.T) {
}) })
} }
} }
func newPodSpec() corev1.PodSpec {
return corev1.PodSpec{
Containers: []corev1.Container{
corev1.Container{
Name: "nginx",
Image: "nginx:1.15.12",
Ports: []corev1.ContainerPort{
corev1.ContainerPort{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
}
}

View File

@@ -262,20 +262,20 @@ func (pc *PodController) Ready() <-chan struct{} {
} }
// runWorker is a long-running function that will continually call the processNextWorkItem function in order to read and process an item on the work queue. // runWorker is a long-running function that will continually call the processNextWorkItem function in order to read and process an item on the work queue.
func (pc *PodController) runWorker(ctx context.Context, workerId string, q workqueue.RateLimitingInterface) { func (pc *PodController) runWorker(ctx context.Context, workerID string, q workqueue.RateLimitingInterface) {
for pc.processNextWorkItem(ctx, workerId, q) { for pc.processNextWorkItem(ctx, workerID, q) {
} }
} }
// processNextWorkItem will read a single work item off the work queue and attempt to process it,by calling the syncHandler. // processNextWorkItem will read a single work item off the work queue and attempt to process it,by calling the syncHandler.
func (pc *PodController) processNextWorkItem(ctx context.Context, workerId string, q workqueue.RateLimitingInterface) bool { func (pc *PodController) processNextWorkItem(ctx context.Context, workerID string, q workqueue.RateLimitingInterface) bool {
// We create a span only after popping from the queue so that we can get an adequate picture of how long it took to process the item. // We create a span only after popping from the queue so that we can get an adequate picture of how long it took to process the item.
ctx, span := trace.StartSpan(ctx, "processNextWorkItem") ctx, span := trace.StartSpan(ctx, "processNextWorkItem")
defer span.End() defer span.End()
// Add the ID of the current worker as an attribute to the current span. // Add the ID of the current worker as an attribute to the current span.
ctx = span.WithField(ctx, "workerId", workerId) ctx = span.WithField(ctx, "workerId", workerID)
return handleQueueItem(ctx, q, pc.syncHandler) return handleQueueItem(ctx, q, pc.syncHandler)
} }