Enable all linters by default
This removes the directive from .golangci.yml to disable all linters, and fixes the relevant bugs / issues that are exposed.
This commit is contained in:
@@ -3,7 +3,6 @@ linter-settings:
|
||||
line-length: 200
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- errcheck
|
||||
- govet
|
||||
|
||||
@@ -47,7 +47,6 @@ func NewCommand(s *provider.Store) *cobra.Command {
|
||||
}
|
||||
fmt.Fprintln(cmd.OutOrStdout(), args[0])
|
||||
}
|
||||
return
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ func NewJaegerExporter(opts TracingExporterOptions) (trace.Exporter, error) {
|
||||
},
|
||||
}
|
||||
|
||||
if jOpts.Endpoint == "" && jOpts.AgentEndpoint == "" {
|
||||
if jOpts.Endpoint == "" && jOpts.AgentEndpoint == "" { // nolint:staticcheck
|
||||
return nil, errors.New("Must specify either JAEGER_ENDPOINT or JAEGER_AGENT_ENDPOINT")
|
||||
}
|
||||
|
||||
|
||||
@@ -329,7 +329,7 @@ func (p *MockProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
|
||||
}
|
||||
|
||||
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) {
|
||||
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") //nolint:ineffassign
|
||||
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") // nolint:staticcheck,ineffassign
|
||||
defer span.End()
|
||||
|
||||
n.Status.Capacity = p.capacity()
|
||||
@@ -429,7 +429,7 @@ func (p *MockProvider) nodeDaemonEndpoints() v1.NodeDaemonEndpoints {
|
||||
// GetStatsSummary returns dummy stats for all pods known by this provider.
|
||||
func (p *MockProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, error) {
|
||||
var span trace.Span
|
||||
ctx, span = trace.StartSpan(ctx, "GetStatsSummary") //nolint: ineffassign
|
||||
ctx, span = trace.StartSpan(ctx, "GetStatsSummary") //nolint: ineffassign,staticcheck
|
||||
defer span.End()
|
||||
|
||||
// Grab the current timestamp so we can report it as the time the stats were generated.
|
||||
|
||||
@@ -108,7 +108,7 @@ func populateContainerEnvironment(ctx context.Context, pod *corev1.Pod, containe
|
||||
// https://github.com/kubernetes/kubernetes/blob/v1.13.1/pkg/kubelet/kubelet_pods.go#L557-L558
|
||||
container.EnvFrom = []corev1.EnvFromSource{}
|
||||
|
||||
res := make([]corev1.EnvVar, 0)
|
||||
res := make([]corev1.EnvVar, 0, len(tmpEnv))
|
||||
|
||||
for key, val := range tmpEnv {
|
||||
res = append(res, corev1.EnvVar{
|
||||
@@ -171,7 +171,7 @@ func getServiceEnvVarMap(rm *manager.ResourceManager, ns string, enableServiceLi
|
||||
// makeEnvironmentMapBasedOnEnvFrom returns a map representing the resolved environment of the specified container after being populated from the entries in the ".envFrom" field.
|
||||
func makeEnvironmentMapBasedOnEnvFrom(ctx context.Context, pod *corev1.Pod, container *corev1.Container, rm *manager.ResourceManager, recorder record.EventRecorder) (map[string]string, error) {
|
||||
// Create a map to hold the resulting environment.
|
||||
res := make(map[string]string, 0)
|
||||
res := make(map[string]string)
|
||||
// Iterate over "envFrom" references in order to populate the environment.
|
||||
loop:
|
||||
for _, envFrom := range container.EnvFrom {
|
||||
|
||||
@@ -250,13 +250,13 @@ func TestUpdateNodeStatus(t *testing.T) {
|
||||
nodes := testclient.NewSimpleClientset().CoreV1().Nodes()
|
||||
|
||||
ctx := context.Background()
|
||||
updated, err := updateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
_, err := updateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
assert.Equal(t, errors.IsNotFound(err), true, err)
|
||||
|
||||
_, err = nodes.Create(ctx, n, metav1.CreateOptions{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
updated, err = updateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
updated, err := updateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.NilError(t, err)
|
||||
@@ -382,16 +382,11 @@ func TestPingAfterStatusUpdate(t *testing.T) {
|
||||
}
|
||||
|
||||
notifyTimer := time.After(interval * time.Duration(10))
|
||||
select {
|
||||
case <-notifyTimer:
|
||||
testP.triggerStatusUpdate(testNodeCopy)
|
||||
}
|
||||
<-notifyTimer
|
||||
testP.triggerStatusUpdate(testNodeCopy)
|
||||
|
||||
endTimer := time.After(interval * time.Duration(10))
|
||||
select {
|
||||
case <-endTimer:
|
||||
break
|
||||
}
|
||||
<-endTimer
|
||||
|
||||
testP.maxPingIntervalLock.Lock()
|
||||
defer testP.maxPingIntervalLock.Unlock()
|
||||
@@ -445,10 +440,7 @@ func TestBeforeAnnotationsPreserved(t *testing.T) {
|
||||
|
||||
t.Log("Waiting for node to exist")
|
||||
assert.NilError(t, <-waitForEvent(ctx, nr, func(e watch.Event) bool {
|
||||
if e.Object == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return e.Object != nil
|
||||
}))
|
||||
|
||||
testP.notifyNodeStatus(&corev1.Node{
|
||||
@@ -519,10 +511,7 @@ func TestManualConditionsPreserved(t *testing.T) {
|
||||
return false
|
||||
}
|
||||
receivedNode := e.Object.(*corev1.Node)
|
||||
if len(receivedNode.Status.Conditions) != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return len(receivedNode.Status.Conditions) == 0
|
||||
}))
|
||||
|
||||
newNode, err := nodes.Get(ctx, testNodeCopy.Name, emptyGetOptions)
|
||||
|
||||
@@ -629,7 +629,6 @@ func (pc *PodController) deleteDanglingPods(ctx context.Context, threadiness int
|
||||
|
||||
// Wait for all pods to be deleted.
|
||||
wg.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
// loggablePodName returns the "namespace/name" key for the specified pod.
|
||||
|
||||
@@ -15,13 +15,8 @@
|
||||
package opencensus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/trace"
|
||||
)
|
||||
|
||||
func TestTracerImplementsTracer(t *testing.T) {
|
||||
// ensure that Adapter implements trace.Tracer
|
||||
if tt := trace.Tracer(Adapter{}); tt == nil {
|
||||
}
|
||||
}
|
||||
// ensure that Adapter implements trace.Tracer
|
||||
var _ trace.Tracer = (*Adapter)(nil)
|
||||
|
||||
Reference in New Issue
Block a user