Merge branch 'master' into feature/dependabot

This commit is contained in:
Brian Goff
2022-08-31 11:30:14 -07:00
committed by GitHub
31 changed files with 1149 additions and 257 deletions

View File

@@ -1,167 +0,0 @@
version: 2.0
jobs:
validate:
resource_class: xlarge
docker:
- image: circleci/golang:1.15
environment:
GO111MODULE: "on"
GOPROXY: https://proxy.golang.org
working_directory: /go/src/github.com/virtual-kubelet/virtual-kubelet
steps:
- checkout
- restore_cache:
keys:
- validate-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
- run:
name: go vet
command: V=1 CI=1 make vet
- run:
name: Lint
command: make lint
- run:
name: Dependencies
command: scripts/validate/gomod.sh
- save_cache:
key: validate-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
test:
resource_class: xlarge
docker:
- image: circleci/golang:1.16
environment:
GO111MODULE: "on"
working_directory: /go/src/github.com/virtual-kubelet/virtual-kubelet
steps:
- checkout
- restore_cache:
keys:
- test-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
- run:
name: Build
command: V=1 make build
- run: go install gotest.tools/gotestsum@latest
- run:
name: Tests
environment:
GOTEST: gotestsum -- -timeout=9m
GOTESTSUM_JUNITFILE: output/unit/results.xml
GODEBUG: cgocheck=2
command: |
mkdir -p output/unit
V=1 make test envtest
- save_cache:
key: test-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
- store_test_results:
path: output
e2e:
machine:
image: ubuntu-1604:202010-01
working_directory: /home/circleci/go/src/github.com/virtual-kubelet/virtual-kubelet
environment:
CHANGE_MINIKUBE_NONE_USER: true
GOPATH: /home/circleci/go
KUBECONFIG: /home/circleci/.kube/config
KUBERNETES_VERSION: v1.20.1
MINIKUBE_HOME: /home/circleci
MINIKUBE_VERSION: v1.16.0
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
SKAFFOLD_VERSION: v1.17.2
GO111MODULE: "on"
steps:
- checkout
- run:
name: Install kubectl
command: |
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
mkdir -p ${HOME}/.kube
touch ${HOME}/.kube/config
- run:
name: Install Skaffold
command: |
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/${SKAFFOLD_VERSION}/skaffold-linux-amd64
chmod +x skaffold
sudo mv skaffold /usr/local/bin/
- run:
name: Install Minikube dependencies
command: |
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet # systemd unit is disabled
- run:
name: Install Minikube
command: |
curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
chmod +x minikube
sudo mv minikube /usr/local/bin/
- run:
name: Start Minikube
command: |
sudo -E minikube start --vm-driver=none --cpus 2 --memory 2048 --kubernetes-version=${KUBERNETES_VERSION}
- run:
name: Wait for Minikube
command: |
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}';
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1;
done
- run:
name: Watch pods
command: kubectl get pods -o json --watch
background: true
- run:
name: Watch nodes
command: kubectl get nodes -o json --watch
background: true
- restore_cache:
keys:
- e2e-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}-2
- run:
name: Run the end-to-end test suite
environment:
GOTEST: gotestsum --
command: |
mkdir $HOME/.go
export PATH=$HOME/.go/bin:${PATH}
curl -fsSL -o "/tmp/go.tar.gz" "https://dl.google.com/go/go1.16.4.linux-amd64.tar.gz"
tar -C $HOME/.go --strip-components=1 -xzf "/tmp/go.tar.gz"
go version
mkdir -p output/e2e
export GOTESTSUM_JUNITFILE="$(pwd)/output/e2e/results.xml"
export PATH="${GOPATH}/bin:${PATH}"
go install gotest.tools/gotestsum@latest
make e2e
- store_test_results:
path: output
- save_cache:
key: e2e-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}-2
paths:
- "/home/circleci/go/pkg/mod"
- run:
name: Collect logs on failure from vkubelet-mock-0
command: |
kubectl logs vkubelet-mock-0
when: on_fail
workflows:
version: 2
validate_and_test:
jobs:
- validate
- test
- e2e:
requires:
- validate
- test

110
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,110 @@
name: CI
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches: [master]
pull_request:
env:
GO_VERSION: "1.18"
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v2
- uses: golangci/golangci-lint-action@v3
with:
version: v1.48.0
args: --timeout=5m
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v2
- name: Run Tests
run: make test
env-tests:
name: Evntest Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v2
- name: Run Tests
run: make envtest
e2e:
name: E2E
runs-on: ubuntu-22.04
timeout-minutes: 10
env:
CHANGE_MINIKUBE_NONE_USER: true
KUBERNETES_VERSION: v1.20.1
MINIKUBE_HOME: /home/circleci
MINIKUBE_VERSION: v1.16.0
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
SKAFFOLD_VERSION: v1.17.2
GO111MODULE: "on"
steps:
- uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Skaffold
run: |
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/${SKAFFOLD_VERSION}/skaffold-linux-amd64
chmod +x skaffold
sudo mv skaffold /usr/local/bin/
echo /usr/local/bin >> $GITHUB_PATH
- name: Install Minikube dependencies
run: |
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet # systemd unit is disabled
- name: Install Minikube
run: |
curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
chmod +x minikube
sudo mv minikube /usr/local/bin/
- name: Start Minikube
run: |
sudo -E PATH=$PATH minikube start --vm-driver=none --cpus 2 --memory 2048 --kubernetes-version=${KUBERNETES_VERSION}
- name: Wait for Minikube
run: |
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}';
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1;
done
- name: Run Tests
run: make e2e

View File

@@ -1,13 +1,17 @@
name: "CodeQL"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches: [ master ]
branches: [master]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
branches: [master]
schedule:
- cron: '19 18 * * 3'
- cron: "19 18 * * 3"
jobs:
analyze:
@@ -17,40 +21,40 @@ jobs:
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
language: ["go"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1

View File

@@ -12,20 +12,26 @@ run:
linters:
enable:
- errcheck
- govet
- ineffassign
- golint
- goconst
- goimports
- unused
- structcheck
- varcheck
- deadcode
- staticcheck
- unconvert
- gofmt
- goimports
- ineffassign
- vet
- unused
- misspell
- nolintlint
- gocritic
- gosec
- exportloopref # Checks for pointers to enclosing loop variables
- tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
linters-settings:
gosec:
excludes:
- G304
issues:
exclude-use-default: false
exclude:
# EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). (is not checked|Errors unhandled)

View File

@@ -22,7 +22,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
klog "k8s.io/klog/v2"
)
type mapVar map[string]string
@@ -61,8 +61,10 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
flags.StringVar(&c.KubeConfigPath, "kubeconfig", c.KubeConfigPath, "kube config file to use for connecting to the Kubernetes API server")
flags.StringVar(&c.KubeNamespace, "namespace", c.KubeNamespace, "kubernetes namespace (default is 'all')")
/* #nosec */
flags.MarkDeprecated("namespace", "Nodes must watch for pods in all namespaces. This option is now ignored.") //nolint:errcheck
flags.MarkHidden("namespace") //nolint:errcheck
/* #nosec */
flags.MarkHidden("namespace") //nolint:errcheck
flags.StringVar(&c.KubeClusterDomain, "cluster-domain", c.KubeClusterDomain, "kubernetes cluster-domain (default is 'cluster.local')")
flags.StringVar(&c.NodeName, "nodename", c.NodeName, "kubernetes node name")
@@ -74,13 +76,16 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
flags.StringVar(&c.TaintKey, "taint", c.TaintKey, "Set node taint key")
flags.BoolVar(&c.DisableTaint, "disable-taint", c.DisableTaint, "disable the virtual-kubelet node taint")
/* #nosec */
flags.MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable") //nolint:errcheck
flags.IntVar(&c.PodSyncWorkers, "pod-sync-workers", c.PodSyncWorkers, `set the number of pod synchronization workers`)
flags.BoolVar(&c.EnableNodeLease, "enable-node-lease", c.EnableNodeLease, `use node leases (1.13) for node heartbeats`)
/* #nosec */
flags.MarkDeprecated("enable-node-lease", "leases are always enabled") //nolint:errcheck
flags.MarkHidden("enable-node-lease") //nolint:errcheck
/* #nosec */
flags.MarkHidden("enable-node-lease") //nolint:errcheck
flags.StringSliceVar(&c.TraceExporters, "trace-exporter", c.TraceExporters, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", AvailableTraceExporters()))
flags.StringVar(&c.TraceConfig.ServiceName, "trace-service-name", c.TraceConfig.ServiceName, "sets the name of the service used to register with the trace exporter")

View File

@@ -36,7 +36,7 @@ func getTaint(c Opts) (*corev1.Taint, error) {
key = getEnv("VKUBELET_TAINT_KEY", key)
value = getEnv("VKUBELET_TAINT_VALUE", value)
effectEnv := getEnv("VKUBELET_TAINT_EFFECT", string(c.TaintEffect))
effectEnv := getEnv("VKUBELET_TAINT_EFFECT", c.TaintEffect)
var effect corev1.TaintEffect
switch effectEnv {

View File

@@ -15,6 +15,7 @@
package root
import (
"fmt"
"os"
"path/filepath"
"strconv"
@@ -95,6 +96,8 @@ type Opts struct {
Version string
}
const maxInt32 = 1<<31 - 1
// SetDefaultOpts sets default options for unset values on the passed in option struct.
// Fields tht are already set will not be modified.
func SetDefaultOpts(c *Opts) error {
@@ -128,6 +131,10 @@ func SetDefaultOpts(c *Opts) error {
if err != nil {
return errors.Wrap(err, "error parsing KUBELET_PORT environment variable")
}
if p > maxInt32 {
return fmt.Errorf("KUBELET_PORT environment variable is too large")
}
/* #nosec */
c.ListenPort = int32(p)
} else {
c.ListenPort = DefaultListenPort

View File

@@ -21,6 +21,7 @@ import (
"os"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
@@ -105,7 +106,8 @@ func setupZpages(ctx context.Context) {
zpages.Handle(mux, "/debug")
go func() {
// This should never terminate, if it does, it will always terminate with an error
e := http.Serve(listener, mux)
srv := &http.Server{Handler: mux, ReadHeaderTimeout: 30 * time.Second}
e := srv.Serve(listener)
if e == http.ErrServerClosed {
return
}

View File

@@ -19,7 +19,8 @@ import (
"go.opencensus.io/trace"
)
type TracingExporterOptions struct { // nolint: golint
// TracingExporterOptions is the options passed to the tracing exporter init function.
type TracingExporterOptions struct { //nolint: golint
Tags map[string]string
ServiceName string
}

View File

@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !no_jaeger_exporter
// +build !no_jaeger_exporter
package root
import (
"errors"
"fmt"
"os"
"contrib.go.opencensus.io/exporter/jaeger"
@@ -32,7 +32,6 @@ func init() {
// NewJaegerExporter creates a new opencensus tracing exporter.
func NewJaegerExporter(opts TracingExporterOptions) (trace.Exporter, error) {
jOpts := jaeger.Options{
Endpoint: os.Getenv("JAEGER_ENDPOINT"), // deprecated
CollectorEndpoint: os.Getenv("JAEGER_COLLECTOR_ENDPOINT"),
AgentEndpoint: os.Getenv("JAEGER_AGENT_ENDPOINT"),
Username: os.Getenv("JAEGER_USER"),
@@ -42,11 +41,8 @@ func NewJaegerExporter(opts TracingExporterOptions) (trace.Exporter, error) {
},
}
if jOpts.Endpoint != "" && jOpts.CollectorEndpoint == "" { // nolintlint:staticcheck
jOpts.CollectorEndpoint = fmt.Sprintf("%s/api/traces", jOpts.Endpoint) // nolintlint:staticcheck
}
if jOpts.CollectorEndpoint == "" && jOpts.AgentEndpoint == "" { // nolintlint:staticcheck
return nil, errors.New("Must specify either JAEGER_COLLECTOR_ENDPOINT or JAEGER_AGENT_ENDPOINT")
return nil, errors.New("must specify either JAEGER_COLLECTOR_ENDPOINT or JAEGER_AGENT_ENDPOINT")
}
for k, v := range opts.Tags {

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !no_ocagent_exporter
// +build !no_ocagent_exporter
package root

View File

@@ -42,7 +42,7 @@ var (
*/
// MockProvider implements the virtual-kubelet provider interface and stores pods in memory.
type MockProvider struct { // nolint:golint
type MockProvider struct { //nolint:golint
nodeName string
operatingSystem string
internalIP string
@@ -54,7 +54,7 @@ type MockProvider struct { // nolint:golint
}
// MockConfig contains a mock virtual-kubelet's configurable parameters.
type MockConfig struct { // nolint:golint
type MockConfig struct { //nolint:golint
CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
Pods string `json:"pods,omitempty"`
@@ -328,8 +328,8 @@ func (p *MockProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
return pods, nil
}
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { // nolint:golint
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") // nolint:staticcheck,ineffassign
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { //nolint:golint
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") //nolint:staticcheck,ineffassign
defer span.End()
n.Status.Capacity = p.capacity()
@@ -467,10 +467,14 @@ func (p *MockProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, err
for _, container := range pod.Spec.Containers {
// Grab a dummy value to be used as the total CPU usage.
// The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
/* #nosec */
dummyUsageNanoCores := uint64(rand.Uint32())
totalUsageNanoCores += dummyUsageNanoCores
// Create a dummy value to be used as the total RAM usage.
// The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
/* #nosec */
dummyUsageBytes := uint64(rand.Uint32())
totalUsageBytes += dummyUsageBytes
// Append a ContainerStats object containing the dummy stats to the PodStats object.

View File

@@ -13,7 +13,7 @@ type Store struct {
ls map[string]InitFunc
}
func NewStore() *Store { // nolint:golint
func NewStore() *Store { //nolint:golint
return &Store{
ls: make(map[string]InitFunc),
}
@@ -71,4 +71,4 @@ type InitConfig struct {
ResourceManager *manager.ResourceManager
}
type InitFunc func(InitConfig) (Provider, error) // nolint:golint
type InitFunc func(InitConfig) (Provider, error) //nolint:golint

View File

@@ -7,7 +7,7 @@ const (
OperatingSystemWindows = "windows"
)
type OperatingSystems map[string]bool // nolint:golint
type OperatingSystems map[string]bool //nolint:golint
var (
// ValidOperatingSystems defines the group of operating systems
@@ -18,7 +18,7 @@ var (
}
)
func (o OperatingSystems) Names() []string { // nolint:golint
func (o OperatingSystems) Names() []string { //nolint:golint
keys := make([]string, 0, len(o))
for k := range o {
keys = append(keys, k)

View File

@@ -6,6 +6,7 @@ import (
)
func registerMock(s *provider.Store) {
/* #nosec */
s.Register("mock", func(cfg provider.InitConfig) (provider.Provider, error) { //nolint:errcheck
return mock.NewMockProvider(
cfg.ConfigPath,

7
go.mod
View File

@@ -9,7 +9,7 @@ require (
github.com/docker/spdystream v0.0.0-20170912183627-bc6354cbbc29 // indirect
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 // indirect
github.com/google/go-cmp v0.5.2
github.com/google/go-cmp v0.5.6
github.com/gorilla/mux v1.7.3
github.com/mitchellh/go-homedir v1.1.0
github.com/pkg/errors v0.9.1
@@ -18,8 +18,11 @@ require (
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
go.opencensus.io v0.22.2
go.opentelemetry.io/otel v1.2.0
go.opentelemetry.io/otel/sdk v1.2.0
go.opentelemetry.io/otel/trace v1.2.0
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/api v0.15.1 // indirect
gotest.tools v2.2.0+incompatible

16
go.sum
View File

@@ -214,8 +214,9 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -399,8 +400,9 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
@@ -424,6 +426,12 @@ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/otel v1.2.0 h1:YOQDvxO1FayUcT9MIhJhgMyNO1WqoduiyvQHzGN0kUQ=
go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I=
go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo=
go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U=
go.opentelemetry.io/otel/trace v1.2.0 h1:Ys3iqbqZhcf28hHzrm5WAquMkDHNZTUkw7KHbuNjej0=
go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
@@ -546,8 +554,9 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -669,6 +678,7 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=

View File

@@ -123,7 +123,7 @@ func TestGetConfigMap(t *testing.T) {
}
value := configMap.Data["key-0"]
if value != "val-0" {
t.Fatal("got unexpected value", string(value))
t.Fatal("got unexpected value", value)
}
// Try to get a configmap that does not exist, and make sure we've got a "not found" error as a response.

View File

@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -33,7 +33,9 @@ func handleError(f handlerFunc) http.HandlerFunc {
code := httpStatusCode(err)
w.WriteHeader(code)
io.WriteString(w, err.Error()) //nolint:errcheck
if _, err := io.WriteString(w, err.Error()); err != nil {
log.G(req.Context()).WithError(err).Error("error writing error response")
}
logger := log.G(req.Context()).WithError(err).WithField("httpStatusCode", code)
if code >= 500 {

View File

@@ -24,14 +24,15 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
)
type PodListerFunc func(context.Context) ([]*v1.Pod, error) // nolint:golint
type PodListerFunc func(context.Context) ([]*v1.Pod, error) //nolint:golint
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { // nolint:golint
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { //nolint:golint
if getPods == nil {
return NotImplemented
}
scheme := runtime.NewScheme()
/* #nosec */
v1.SchemeBuilder.AddToScheme(scheme) //nolint:errcheck
codecs := serializer.NewCodecFactory(scheme)

View File

@@ -33,7 +33,7 @@ type ServeMux interface {
Handle(path string, h http.Handler)
}
type PodHandlerConfig struct { // nolint:golint
type PodHandlerConfig struct { //nolint:golint
RunInContainer ContainerExecHandlerFunc
GetContainerLogs ContainerLogsHandlerFunc
// GetPods is meant to enumerate the pods that the provider knows about
@@ -79,7 +79,7 @@ func PodHandler(p PodHandlerConfig, debug bool) http.Handler {
// PodStatsSummaryHandler creates an http handler for serving pod metrics.
//
// If the passed in handler func is nil this will create handlers which only
// serves http.StatusNotImplemented
// serves http.StatusNotImplemented
func PodStatsSummaryHandler(f PodStatsSummaryHandlerFunc) http.Handler {
if f == nil {
return http.HandlerFunc(NotImplemented)

View File

@@ -333,7 +333,7 @@ func (e *nodeNotReadyError) Is(target error) bool {
return ok
}
func (e *nodeNotReadyError) As(target error) bool {
func (e *nodeNotReadyError) As(target interface{}) bool {
val, ok := target.(*nodeNotReadyError)
if ok {
*val = *e

View File

@@ -38,7 +38,7 @@ var (
const (
// There might be a constant we can already leverage here
testNamespace = "default"
informerResyncPeriod = time.Duration(1 * time.Second)
informerResyncPeriod = 1 * time.Second
testNodeName = "testnode"
podSyncWorkers = 3
)
@@ -232,7 +232,7 @@ type system struct {
}
func (s *system) start(ctx context.Context) error {
go s.pc.Run(ctx, podSyncWorkers) // nolint:errcheck
go s.pc.Run(ctx, podSyncWorkers) //nolint:errcheck
select {
case <-s.pc.Ready():
case <-s.pc.Done():

View File

@@ -54,7 +54,7 @@ var (
//
// Note: Implementers can choose to manage a node themselves, in which case
// it is not needed to provide an implementation for this interface.
type NodeProvider interface { // nolint:golint
type NodeProvider interface { //nolint:revive
// Ping checks if the node is still active.
// This is intended to be lightweight as it will be called periodically as a
// heartbeat to keep the node marked as ready in Kubernetes.
@@ -105,7 +105,7 @@ func NewNodeController(p NodeProvider, node *corev1.Node, nodes v1.NodeInterface
}
// NodeControllerOpt are the functional options used for configuring a node
type NodeControllerOpt func(*NodeController) error // nolint:golint
type NodeControllerOpt func(*NodeController) error //nolint:revive
// WithNodeEnableLeaseV1 enables support for v1 leases.
// V1 Leases share all the same properties as v1beta1 leases, except they do not fallback like
@@ -208,7 +208,7 @@ type ErrorHandler func(context.Context, error) error
// NodeController deals with creating and managing a node object in Kubernetes.
// It can register a node with Kubernetes and periodically update its status.
// NodeController manages a single node entity.
type NodeController struct { // nolint:golint
type NodeController struct { //nolint:revive
p NodeProvider
// serverNode must be updated each time it is updated in API Server
@@ -685,10 +685,9 @@ func (t taintsStringer) String() string {
func addNodeAttributes(ctx context.Context, span trace.Span, n *corev1.Node) context.Context {
return span.WithFields(ctx, log.Fields{
"node.UID": string(n.UID),
"node.name": n.Name,
"node.cluster": n.ClusterName,
"node.taints": taintsStringer(n.Spec.Taints),
"node.UID": string(n.UID),
"node.name": n.Name,
"node.taints": taintsStringer(n.Spec.Taints),
})
}

View File

@@ -71,7 +71,7 @@ func testNodeRun(t *testing.T, enableLease bool) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
@@ -189,7 +189,7 @@ func TestNodeCustomUpdateStatusErrorHandler(t *testing.T) {
)
assert.NilError(t, err)
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
timer := time.NewTimer(10 * time.Second)
defer timer.Stop()
@@ -295,7 +295,7 @@ func TestPingAfterStatusUpdate(t *testing.T) {
node, err := NewNodeController(testP, testNode, nodes, opts...)
assert.NilError(t, err)
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
defer func() {
cancel()
<-node.Done()
@@ -363,7 +363,7 @@ func TestBeforeAnnotationsPreserved(t *testing.T) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
@@ -427,7 +427,7 @@ func TestManualConditionsPreserved(t *testing.T) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()

View File

@@ -33,7 +33,7 @@ type authWrapper struct {
// InstrumentAuth wraps the provided Auth in a new instrumented Auth
//
// Note: You would only need this if you rolled your own auth.
// The Auth implementations defined in this package are already instrumented.
// The Auth implementations defined in this package are already instrumented.
func InstrumentAuth(auth Auth) Auth {
if _, ok := auth.(*authWrapper); ok {
// This is already instrumented

View File

@@ -78,12 +78,14 @@ func (n *Node) runHTTP(ctx context.Context) (func(), error) {
log.G(ctx).Debug("Started TLS listener")
srv := &http.Server{Handler: n.h, TLSConfig: n.tlsConfig}
srv := &http.Server{Handler: n.h, TLSConfig: n.tlsConfig, ReadHeaderTimeout: 30 * time.Second}
go srv.Serve(l) //nolint:errcheck
log.G(ctx).Debug("HTTP server running")
return func() {
/* #nosec */
srv.Close()
/* #nosec */
l.Close()
}, nil
}
@@ -275,7 +277,6 @@ func WithClient(c kubernetes.Interface) NodeOpt {
// Some basic values are set for node status, you'll almost certainly want to modify it.
//
// If client is nil, this will construct a client using ClientsetFromEnv
//
// It is up to the caller to configure auth on the HTTP handler.
func NewNode(name string, newProvider NewProviderFunc, opts ...NodeOpt) (*Node, error) {
cfg := NodeConfig{

View File

@@ -403,7 +403,7 @@ func (ts *EndToEndTestSuite) TestCreatePodWithMandatoryInexistentConfigMap(t *te
// It returns an error if the specified pod is not found.
func findPodInPodStats(summary *stats.Summary, pod *v1.Pod) (int, error) {
for i, p := range summary.Pods {
if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && string(p.PodRef.UID) == string(pod.UID) {
if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && p.PodRef.UID == string(pod.UID) {
return i, nil
}
}

View File

@@ -0,0 +1,266 @@
// Copyright © 2022 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package opentelemetry implements a github.com/virtual-kubelet/virtual-kubelet/trace.Tracer
// using openTelemetry as a backend.
//
// Use this by setting `trace.T = Adapter{}`
//
// For customizing trace provider used in Adapter, set trace provider by
// `otel.SetTracerProvider(*sdktrace.TracerProvider)`. Examples of customize are setting service name,
// use your own exporter (e.g. jaeger, otlp, prometheus, zipkin, and stdout) etc. Do not forget
// to call TracerProvider.Shutdown() when you create your TracerProvider to avoid memory leak.
package opentelemetry
import (
"context"
"fmt"
"sync"
"time"
"go.opentelemetry.io/otel/codes"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/trace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
ot "go.opentelemetry.io/otel/trace"
)
type logLevel string
const (
lDebug logLevel = "DEBUG"
lInfo logLevel = "INFO"
lWarn logLevel = "WARN"
lErr logLevel = "ERROR"
lFatal logLevel = "FATAL"
)
// Adapter implements the trace.Tracer interface for openTelemetry
type Adapter struct{}
// StartSpan creates a new span from openTelemetry using the given name.
func (Adapter) StartSpan(ctx context.Context, name string) (context.Context, trace.Span) {
ctx, ots := otel.Tracer(name).Start(ctx, name)
l := log.G(ctx).WithField("method", name)
s := &span{s: ots, l: l}
ctx = log.WithLogger(ctx, s.Logger())
return ctx, s
}
type span struct {
mu sync.Mutex
s ot.Span
l log.Logger
}
func (s *span) End() {
s.s.End()
}
func (s *span) SetStatus(err error) {
if !s.s.IsRecording() {
return
}
if err == nil {
s.s.SetStatus(codes.Ok, "")
} else {
s.s.SetStatus(codes.Error, err.Error())
}
}
func (s *span) WithField(ctx context.Context, key string, val interface{}) context.Context {
s.mu.Lock()
s.l = s.l.WithField(key, val)
ctx = log.WithLogger(ctx, &logger{s: s.s, l: s.l})
s.mu.Unlock()
if s.s.IsRecording() {
s.s.SetAttributes(makeAttribute(key, val))
}
return ctx
}
func (s *span) WithFields(ctx context.Context, f log.Fields) context.Context {
s.mu.Lock()
s.l = s.l.WithFields(f)
ctx = log.WithLogger(ctx, &logger{s: s.s, l: s.l})
s.mu.Unlock()
if s.s.IsRecording() {
attrs := make([]attribute.KeyValue, 0, len(f))
for k, v := range f {
attrs = append(attrs, makeAttribute(k, v))
}
s.s.SetAttributes(attrs...)
}
return ctx
}
func (s *span) Logger() log.Logger {
return &logger{s: s.s, l: s.l}
}
type logger struct {
s ot.Span
l log.Logger
a []attribute.KeyValue
}
func (l *logger) Debug(args ...interface{}) {
l.logEvent(lDebug, args...)
}
func (l *logger) Debugf(f string, args ...interface{}) {
l.logEventf(lDebug, f, args...)
}
func (l *logger) Info(args ...interface{}) {
l.logEvent(lInfo, args...)
}
func (l *logger) Infof(f string, args ...interface{}) {
l.logEventf(lInfo, f, args...)
}
func (l *logger) Warn(args ...interface{}) {
l.logEvent(lWarn, args...)
}
func (l *logger) Warnf(f string, args ...interface{}) {
l.logEventf(lWarn, f, args...)
}
func (l *logger) Error(args ...interface{}) {
l.logEvent(lErr, args...)
}
func (l *logger) Errorf(f string, args ...interface{}) {
l.logEventf(lErr, f, args...)
}
func (l *logger) Fatal(args ...interface{}) {
l.logEvent(lFatal, args...)
}
func (l *logger) Fatalf(f string, args ...interface{}) {
l.logEventf(lFatal, f, args...)
}
func (l *logger) logEvent(ll logLevel, args ...interface{}) {
msg := fmt.Sprint(args...)
switch ll {
case lDebug:
l.l.Debug(msg)
case lInfo:
l.l.Info(msg)
case lWarn:
l.l.Warn(msg)
case lErr:
l.l.Error(msg)
case lFatal:
l.l.Fatal(msg)
}
if !l.s.IsRecording() {
return
}
l.s.AddEvent(msg, ot.WithTimestamp(time.Now()))
}
func (l *logger) logEventf(ll logLevel, f string, args ...interface{}) {
switch ll {
case lDebug:
l.l.Debugf(f, args...)
case lInfo:
l.l.Infof(f, args...)
case lWarn:
l.l.Warnf(f, args...)
case lErr:
l.l.Errorf(f, args...)
case lFatal:
l.l.Fatalf(f, args...)
}
if !l.s.IsRecording() {
return
}
msg := fmt.Sprintf(f, args...)
l.s.AddEvent(msg, ot.WithTimestamp(time.Now()))
}
func (l *logger) WithError(err error) log.Logger {
return l.WithField("err", err)
}
func (l *logger) WithField(k string, value interface{}) log.Logger {
var attrs []attribute.KeyValue
if l.s.IsRecording() {
attrs = make([]attribute.KeyValue, len(l.a)+1)
copy(attrs, l.a)
attrs[len(attrs)-1] = makeAttribute(k, value)
}
return &logger{s: l.s, a: attrs, l: l.l.WithField(k, value)}
}
func (l *logger) WithFields(fields log.Fields) log.Logger {
var attrs []attribute.KeyValue
if l.s.IsRecording() {
attrs = make([]attribute.KeyValue, len(l.a), len(l.a)+len(fields))
copy(attrs, l.a)
for k, v := range fields {
attrs = append(attrs, makeAttribute(k, v))
}
}
return &logger{s: l.s, a: attrs, l: l.l.WithFields(fields)}
}
func makeAttribute(key string, val interface{}) (attr attribute.KeyValue) {
switch v := val.(type) {
case string:
return attribute.String(key, v)
case []string:
return attribute.StringSlice(key, v)
case fmt.Stringer:
return attribute.Stringer(key, v)
case int:
return attribute.Int(key, v)
case []int:
return attribute.IntSlice(key, v)
case int64:
return attribute.Int64(key, v)
case float64:
return attribute.Float64(key, v)
case []float64:
return attribute.Float64Slice(key, v)
case []int64:
return attribute.Int64Slice(key, v)
case bool:
return attribute.Bool(key, v)
case []bool:
return attribute.BoolSlice(key, v)
case error:
if v == nil {
attribute.String(key, "")
}
return attribute.String(key, v.Error())
default:
return attribute.String(key, fmt.Sprintf("%+v", val))
}
}

View File

@@ -0,0 +1,640 @@
// Copyright © 2022 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opentelemetry
import (
"context"
"strconv"
"sync"
"testing"
"time"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/log"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
"gotest.tools/assert"
"gotest.tools/assert/cmp"
)
func TestStartSpan(t *testing.T) {
t.Run("addField", func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
a := Adapter{}
_, s := a.StartSpan(ctx, "name")
s.End()
})
}
func TestSetStatus(t *testing.T) {
testCases := []struct {
description string
spanName string
inputStatus error
expectedCode codes.Code
expectedDescription string
}{
{
description: "error status",
spanName: "test",
inputStatus: errors.New("fake msg"),
expectedCode: codes.Error,
expectedDescription: "fake msg",
}, {
description: "non-error status",
spanName: "test",
inputStatus: nil,
expectedCode: codes.Ok,
expectedDescription: "",
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
s.SetStatus(tt.inputStatus)
assert.Assert(t, s.s.IsRecording())
s.End()
assert.Assert(t, !s.s.IsRecording())
assert.Assert(t, e.status.Code == tt.expectedCode)
assert.Assert(t, e.status.Description == tt.expectedDescription)
s.SetStatus(tt.inputStatus) // should not be panic even if span is ended.
})
}
}
func TestWithField(t *testing.T) {
type field struct {
key string
value interface{}
}
testCases := []struct {
description string
spanName string
fields []field
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey2", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
}, {
description: "duplicated fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey1", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
for _, f := range tt.fields {
ctx = s.WithField(ctx, f.key, f.value)
}
s.End()
assert.Assert(t, len(e.attributes) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(e.attributes, a)
}
})
}
}
func TestWithFields(t *testing.T) {
testCases := []struct {
description string
spanName string
fields log.Fields
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: log.Fields{"testKey1": "value1"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: log.Fields{"testKey1": "value1", "testKey2": "value2"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
_ = s.WithFields(ctx, tt.fields)
s.End()
assert.Assert(t, len(e.attributes) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(e.attributes, a)
}
})
}
}
func TestLog(t *testing.T) {
testCases := []struct {
description string
spanName string
logLevel logLevel
fields log.Fields
msg string
expectedEvents []sdktrace.Event
expectedAttributes []attribute.KeyValue
}{
{
description: "debug",
spanName: "test",
logLevel: lDebug,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "info",
spanName: "test",
logLevel: lInfo,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "warn",
spanName: "test",
logLevel: lWarn,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "error",
spanName: "test",
logLevel: lErr,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "fatal",
spanName: "test",
logLevel: lFatal,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
switch tt.logLevel {
case lDebug:
l.WithFields(tt.fields).Debug(tt.msg)
case lInfo:
l.WithFields(tt.fields).Info(tt.msg)
case lWarn:
l.WithFields(tt.fields).Warn(tt.msg)
case lErr:
l.WithFields(tt.fields).Error(tt.msg)
case lFatal:
l.WithFields(tt.fields).Fatal(tt.msg)
}
s.End()
assert.Assert(t, len(e.events) == len(tt.expectedEvents))
for i, event := range tt.expectedEvents {
assert.Assert(t, e.events[i].Name == event.Name)
assert.Assert(t, !e.events[i].Time.IsZero())
}
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func TestLogf(t *testing.T) {
testCases := []struct {
description string
spanName string
logLevel logLevel
msg string
fields log.Fields
args []interface{}
expectedEvents []sdktrace.Event
expectedAttributes []attribute.KeyValue
}{
{
description: "debug",
spanName: "test",
logLevel: lDebug,
msg: "k1: %s, k2: %v, k3: %d, k4: %v",
fields: map[string]interface{}{"k1": "test", "k2": []string{"test"}, "k3": 1, "k4": []int{1}},
args: []interface{}{"test", []string{"test"}, int(1), []int{1}},
expectedEvents: []sdktrace.Event{{Name: "k1: test, k2: [test], k3: 1, k4: [1]"}},
expectedAttributes: []attribute.KeyValue{
attribute.String("k1", "test"),
attribute.StringSlice("k2", []string{"test"}),
attribute.Int("k3", 1),
attribute.IntSlice("k4", []int{1}),
},
}, {
description: "info",
spanName: "test",
logLevel: lInfo,
msg: "k1: %d, k2: %v, k3: %f, k4: %v",
fields: map[string]interface{}{"k1": int64(3), "k2": []int64{4}, "k3": float64(2), "k4": []float64{4}},
args: []interface{}{int64(3), []int64{4}, float64(2), []float64{4}},
expectedEvents: []sdktrace.Event{{Name: "k1: 3, k2: [4], k3: 2.000000, k4: [4]"}},
expectedAttributes: []attribute.KeyValue{
attribute.Int64("k1", 1),
attribute.Int64Slice("k2", []int64{2}),
attribute.Float64("k3", 3),
attribute.Float64Slice("k2", []float64{4}),
},
}, {
description: "warn",
spanName: "test",
logLevel: lWarn,
msg: "k1: %v, k2: %v",
fields: map[string]interface{}{"k1": map[int]int{1: 1}, "k2": num(1)},
args: []interface{}{map[int]int{1: 1}, num(1)},
expectedEvents: []sdktrace.Event{{Name: "k1: map[1:1], k2: 1"}},
expectedAttributes: []attribute.KeyValue{
attribute.String("k1", "{1:1}"),
attribute.Stringer("k2", num(1)),
},
}, {
description: "error",
spanName: "test",
logLevel: lErr,
msg: "k1: %t, k2: %v, k3: %s",
fields: map[string]interface{}{"k1": true, "k2": []bool{true}, "k3": errors.New("fake")},
args: []interface{}{true, []bool{true}, errors.New("fake")},
expectedEvents: []sdktrace.Event{{Name: "k1: true, k2: [true], k3: fake"}},
expectedAttributes: []attribute.KeyValue{
attribute.Bool("k1", true),
attribute.BoolSlice("k2", []bool{true}),
attribute.String("k3", "fake"),
},
}, {
description: "fatal",
spanName: "test",
logLevel: lFatal,
expectedEvents: []sdktrace.Event{{Name: ""}},
expectedAttributes: []attribute.KeyValue{},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
switch tt.logLevel {
case lDebug:
l.WithFields(tt.fields).Debugf(tt.msg, tt.args...)
case lInfo:
l.WithFields(tt.fields).Infof(tt.msg, tt.args...)
case lWarn:
l.WithFields(tt.fields).Warnf(tt.msg, tt.args...)
case lErr:
l.WithFields(tt.fields).Errorf(tt.msg, tt.args...)
case lFatal:
l.WithFields(tt.fields).Fatalf(tt.msg, tt.args...)
}
s.End()
assert.Assert(t, len(e.events) == len(tt.expectedEvents))
for i, event := range tt.expectedEvents {
assert.Assert(t, e.events[i].Name == event.Name, e.events[i].Name)
assert.Assert(t, !e.events[i].Time.IsZero())
}
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
l.Debugf(tt.msg, tt.args) // should not panic even if span is finished
})
}
}
func TestLogWithField(t *testing.T) {
type field struct {
key string
value interface{}
}
testCases := []struct {
description string
spanName string
fields []field
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey2", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
}, {
description: "duplicated fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey1", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
for _, f := range tt.fields {
l.WithField(f.key, f.value).Info("")
}
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
l.Debug("") // should not panic even if span is finished
})
}
}
func TestLogWithError(t *testing.T) {
testCases := []struct {
description string
spanName string
err error
expectedAttributes []attribute.KeyValue
}{
{
description: "normal",
spanName: "test",
err: errors.New("fake"),
expectedAttributes: []attribute.KeyValue{{Key: "err", Value: attribute.StringValue("fake")}},
}, {
description: "nil error",
spanName: "test",
err: nil,
expectedAttributes: []attribute.KeyValue{{Key: "err", Value: attribute.StringValue("")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
l.WithError(tt.err).Error("")
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func TestLogWithFields(t *testing.T) {
testCases := []struct {
description string
spanName string
fields log.Fields
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: log.Fields{"testKey1": "value1"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: log.Fields{"testKey1": "value1", "testKey2": "value2"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
l.WithFields(tt.fields).Debug("")
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func setupSuite() (func(provider *sdktrace.TracerProvider), *sdktrace.TracerProvider, *fakeExporter) {
r := NewResource("virtual-kubelet", "1.2.3")
e := &fakeExporter{}
p := sdktrace.NewTracerProvider(
sdktrace.WithSyncer(e),
sdktrace.WithResource(r),
sdktrace.WithSampler(sdktrace.AlwaysSample()),
)
otel.SetTracerProvider(p)
// Return a function to teardown the test
return func(provider *sdktrace.TracerProvider) {
_ = p.Shutdown(context.Background())
}, p, e
}
func NewResource(name, version string) *resource.Resource {
return resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String(name),
semconv.ServiceVersionKey.String(version),
)
}
type fakeExporter struct {
sync.Mutex
// attributes describe the aspects of the spans.
attributes []attribute.KeyValue
// Links returns all the links the span has to other spans.
links []sdktrace.Link
// Events returns all the events that occurred within in the spans
// lifetime.
events []sdktrace.Event
// Status returns the spans status.
status sdktrace.Status
}
func (f *fakeExporter) ExportSpans(_ context.Context, spans []sdktrace.ReadOnlySpan) (err error) {
f.Lock()
defer f.Unlock()
f.attributes = make([]attribute.KeyValue, 0)
f.links = make([]sdktrace.Link, 0)
f.events = make([]sdktrace.Event, 0)
for _, s := range spans {
f.attributes = append(f.attributes, s.Attributes()...)
f.links = append(f.links, s.Links()...)
f.events = append(f.events, s.Events()...)
f.status = s.Status()
}
return
}
func (f *fakeExporter) Shutdown(_ context.Context) (err error) {
f.attributes = make([]attribute.KeyValue, 0)
f.links = make([]sdktrace.Link, 0)
f.events = make([]sdktrace.Event, 0)
return
}
type fakeLogger struct {
a []attribute.KeyValue
}
func (*fakeLogger) Debug(...interface{}) {}
func (*fakeLogger) Debugf(string, ...interface{}) {}
func (*fakeLogger) Info(...interface{}) {}
func (*fakeLogger) Infof(string, ...interface{}) {}
func (*fakeLogger) Warn(...interface{}) {}
func (*fakeLogger) Warnf(string, ...interface{}) {}
func (*fakeLogger) Error(...interface{}) {}
func (*fakeLogger) Errorf(string, ...interface{}) {}
func (*fakeLogger) Fatal(...interface{}) {}
func (*fakeLogger) Fatalf(string, ...interface{}) {}
func (l *fakeLogger) WithField(k string, v interface{}) log.Logger {
l.a = append(l.a, makeAttribute(k, v))
return l
}
func (l *fakeLogger) WithFields(fs log.Fields) log.Logger {
for k, v := range fs {
l.a = append(l.a, makeAttribute(k, v))
}
return l
}
func (l *fakeLogger) WithError(err error) log.Logger {
l.a = append(l.a, makeAttribute("err", err))
return l
}
type num int
func (i num) String() string {
return strconv.Itoa(int(i))
}