Add Gocritic
This also fixes the issues laid out by gocritic
This commit is contained in:
@@ -15,6 +15,7 @@ linters:
|
|||||||
- deadcode
|
- deadcode
|
||||||
- misspell
|
- misspell
|
||||||
- nolintlint
|
- nolintlint
|
||||||
|
- gocritic
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-use-default: false
|
exclude-use-default: false
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ type MockProvider struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MockConfig contains a mock virtual-kubelet's configurable parameters.
|
// MockConfig contains a mock virtual-kubelet's configurable parameters.
|
||||||
type MockConfig struct { //nolint:golint
|
type MockConfig struct {
|
||||||
CPU string `json:"cpu,omitempty"`
|
CPU string `json:"cpu,omitempty"`
|
||||||
Memory string `json:"memory,omitempty"`
|
Memory string `json:"memory,omitempty"`
|
||||||
Pods string `json:"pods,omitempty"`
|
Pods string `json:"pods,omitempty"`
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ const (
|
|||||||
//
|
//
|
||||||
// Note: Implementers can choose to manage a node themselves, in which case
|
// Note: Implementers can choose to manage a node themselves, in which case
|
||||||
// it is not needed to provide an implementation for this interface.
|
// it is not needed to provide an implementation for this interface.
|
||||||
type NodeProvider interface { //nolint:golint
|
type NodeProvider interface {
|
||||||
// Ping checks if the node is still active.
|
// Ping checks if the node is still active.
|
||||||
// This is intended to be lightweight as it will be called periodically as a
|
// This is intended to be lightweight as it will be called periodically as a
|
||||||
// heartbeat to keep the node marked as ready in Kubernetes.
|
// heartbeat to keep the node marked as ready in Kubernetes.
|
||||||
|
|||||||
@@ -24,17 +24,7 @@ func ClientsetFromEnv(kubeConfigPath string) (*kubernetes.Clientset, error) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
if kubeConfigPath != "" {
|
if kubeConfigPath != "" {
|
||||||
_, err = os.Stat(kubeConfigPath)
|
config, err = clientsetFromEnvKubeConfigPath(kubeConfigPath)
|
||||||
if err == nil {
|
|
||||||
config, err = clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
|
||||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath},
|
|
||||||
&clientcmd.ConfigOverrides{},
|
|
||||||
).ClientConfig()
|
|
||||||
} else if os.IsNotExist(err) {
|
|
||||||
config, err = rest.InClusterConfig()
|
|
||||||
} else {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
config, err = rest.InClusterConfig()
|
config, err = rest.InClusterConfig()
|
||||||
}
|
}
|
||||||
@@ -46,6 +36,20 @@ func ClientsetFromEnv(kubeConfigPath string) (*kubernetes.Clientset, error) {
|
|||||||
return kubernetes.NewForConfig(config)
|
return kubernetes.NewForConfig(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func clientsetFromEnvKubeConfigPath(kubeConfigPath string) (*rest.Config, error) {
|
||||||
|
_, err := os.Stat(kubeConfigPath)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return rest.InClusterConfig()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||||
|
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfigPath},
|
||||||
|
&clientcmd.ConfigOverrides{},
|
||||||
|
).ClientConfig()
|
||||||
|
}
|
||||||
|
|
||||||
// PodInformerFilter is a filter that you should use when creating a pod informer for use with the pod controller.
|
// PodInformerFilter is a filter that you should use when creating a pod informer for use with the pod controller.
|
||||||
func PodInformerFilter(node string) kubeinformers.SharedInformerOption {
|
func PodInformerFilter(node string) kubeinformers.SharedInformerOption {
|
||||||
return kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
return kubeinformers.WithTweakListOptions(func(options *metav1.ListOptions) {
|
||||||
|
|||||||
@@ -303,10 +303,9 @@ func (pc *PodController) Run(ctx context.Context, podSyncWorkers int) (retErr er
|
|||||||
// At this point we know that something in .metadata or .spec has changed, so we must proceed to sync the pod.
|
// At this point we know that something in .metadata or .spec has changed, so we must proceed to sync the pod.
|
||||||
if key, err := cache.MetaNamespaceKeyFunc(newPod); err != nil {
|
if key, err := cache.MetaNamespaceKeyFunc(newPod); err != nil {
|
||||||
log.G(ctx).Error(err)
|
log.G(ctx).Error(err)
|
||||||
} else {
|
} else if podShouldEnqueue(oldPod, newPod) {
|
||||||
if podShouldEnqueue(oldPod, newPod) {
|
|
||||||
pc.k8sQ.AddRateLimited(key)
|
pc.k8sQ.AddRateLimited(key)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
DeleteFunc: func(pod interface{}) {
|
DeleteFunc: func(pod interface{}) {
|
||||||
|
|||||||
@@ -213,9 +213,9 @@ func (l *logger) WithError(err error) log.Logger {
|
|||||||
|
|
||||||
var a []octrace.Attribute
|
var a []octrace.Attribute
|
||||||
if l.s.IsRecordingEvents() {
|
if l.s.IsRecordingEvents() {
|
||||||
a = make([]octrace.Attribute, len(l.a), len(l.a)+1)
|
a = make([]octrace.Attribute, len(l.a)+1)
|
||||||
copy(a, l.a)
|
copy(a, l.a)
|
||||||
a = append(l.a, makeAttribute("err", err))
|
a[len(a)-1] = makeAttribute("err", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &logger{s: l.s, l: log, a: a}
|
return &logger{s: l.s, l: log, a: a}
|
||||||
@@ -227,9 +227,9 @@ func (l *logger) WithField(k string, value interface{}) log.Logger {
|
|||||||
var a []octrace.Attribute
|
var a []octrace.Attribute
|
||||||
|
|
||||||
if l.s.IsRecordingEvents() {
|
if l.s.IsRecordingEvents() {
|
||||||
a = make([]octrace.Attribute, len(l.a), len(l.a)+1)
|
a = make([]octrace.Attribute, len(l.a)+1)
|
||||||
copy(a, l.a)
|
copy(a, l.a)
|
||||||
a = append(a, makeAttribute(k, value))
|
a[len(a)-1] = makeAttribute(k, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &logger{s: l.s, a: a, l: log}
|
return &logger{s: l.s, a: a, l: log}
|
||||||
|
|||||||
Reference in New Issue
Block a user