Compare commits

..

61 Commits

Author SHA1 Message Date
Pires
63f85cc062 wuo 2023-03-13 10:46:29 +00:00
Brian Goff
7bcacb1cab Merge pull request #1060 from edigaryev/patch-1
README.md: add go.dev badge
2022-12-15 15:15:03 -08:00
Nikolay Edigaryev
a610358f56 README.md: add go.dev badge 2022-12-09 22:34:17 +04:00
dependabot[bot]
704b01eac6 Bump sigs.k8s.io/controller-runtime from 0.7.1 to 0.13.0 (#1034)
Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.7.1 to 0.13.0.
- [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases)
- [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/master/RELEASE.md)
- [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.7.1...v0.13.0)

---
updated-dependencies:
- dependency-name: sigs.k8s.io/controller-runtime
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-14 13:56:42 -07:00
Heba Elayoty
94999fc0b6 Merge pull request #1035 from virtual-kubelet/dependabot/go_modules/k8s.io/klog/v2-2.80.1
Bump k8s.io/klog/v2 from 2.2.0 to 2.80.1
2022-10-14 13:32:39 -07:00
dependabot[bot]
9d8005e4b8 Bump k8s.io/klog/v2 from 2.2.0 to 2.80.1
Bumps [k8s.io/klog/v2](https://github.com/kubernetes/klog) from 2.2.0 to 2.80.1.
- [Release notes](https://github.com/kubernetes/klog/releases)
- [Changelog](https://github.com/kubernetes/klog/blob/main/RELEASE.md)
- [Commits](https://github.com/kubernetes/klog/compare/v2.2.0...v2.80.1)

---
updated-dependencies:
- dependency-name: k8s.io/klog/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-10-10 17:37:07 +00:00
Brian Goff
a486eaffd2 Merge pull request #1046 from cpuguy83/codeql_perms
codeql: Add explicit permissions
2022-10-10 10:36:08 -07:00
Brian Goff
d66366ba96 codeql: Add explicit permissions
Codeql requires write access to security-events, but our default action
token (rightly) only has read permissions.
This adds the explicit request for write access.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-10 17:29:32 +00:00
Brian Goff
bb4e20435d Merge pull request #1042 from cpuguy83/bumps
Bump packages
2022-10-08 10:51:33 -07:00
Brian Goff
6feafcf018 Remove klogv2 alias
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
5db1443e33 Fix apparent bad copy/pasta in test causing panic
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
2c4442b17f Fix linting issues and update make lint target.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
70848cfdae Bump k8s deps to v0.24
This requires dropping otel down to v0.20 because the apiserver package
is importing it and some packages moved around with otel v1.
Even k8s v0.25 still uses this old version of otel, so we are stuck for
a bit (v0.26 will, as of now, use a newer otel version).

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
c668ae6ab6 Bump problematic deps
Changes in klog and logr have made automatic bumps from dependabot
problematic.
We also shouldn't need klogv1 so removed that.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
f7f8b45117 Bump go version to 1.18 in dockerfile
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
5001135763 Merge pull request #1043 from LuBingtan/add-default-client
Add default client in mock provider
2022-09-30 09:08:50 -07:00
lubingtan
67be3c681d Add default client
Signed-off-by: lubingtan <bingtlu@ebay.com>
2022-09-30 09:47:22 +08:00
Brian Goff
fca742986c Merge pull request #1036 from virtual-kubelet/dependabot/github_actions/actions/checkout-3
Bump actions/checkout from 2 to 3
2022-09-12 11:07:19 -07:00
dependabot[bot]
db7f53c1ca Bump actions/checkout from 2 to 3
Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-09-12 17:33:34 +00:00
Brian Goff
c63b8f0dec Merge pull request #994 from pigletfly/bump-go
bump golang to 1.17
2022-09-12 09:38:09 -07:00
pigletfly
83fbc0c687 bump golang to 1.17
Signed-off-by: pigletfly <wangbing.adam@gmail.com>
2022-09-07 09:34:00 +08:00
Brian Goff
d2523fe808 Merge pull request #1031 from cpuguy83/fix_envtest_name
Fix typo in job name
2022-08-31 14:03:00 -07:00
Brian Goff
7ee822ec6d Fix typo in job name 2022-08-31 21:02:12 +00:00
Brian Goff
0b70fb1958 Merge pull request #1025 from virtual-kubelet/dependabot/go_modules/contrib.go.opencensus.io/exporter/jaeger-0.2.1
Bump contrib.go.opencensus.io/exporter/jaeger from 0.1.0 to 0.2.1
2022-08-31 13:15:00 -07:00
dependabot[bot]
a25c1def45 Bump contrib.go.opencensus.io/exporter/jaeger from 0.1.0 to 0.2.1
Bumps [contrib.go.opencensus.io/exporter/jaeger](https://github.com/census-ecosystem/opencensus-go-exporter-jaeger) from 0.1.0 to 0.2.1.
- [Release notes](https://github.com/census-ecosystem/opencensus-go-exporter-jaeger/releases)
- [Commits](https://github.com/census-ecosystem/opencensus-go-exporter-jaeger/compare/v0.1.0...v0.2.1)

---
updated-dependencies:
- dependency-name: contrib.go.opencensus.io/exporter/jaeger
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:58:59 +00:00
Brian Goff
d682bb3894 Merge pull request #1028 from cpuguy83/codeql_nopr
Only run codeql on pushes to master, not pr's
2022-08-31 12:57:42 -07:00
Brian Goff
6198b02423 Only run codeql on pushes to master, not pr's
These are extremely slow and probably very expensive for someone.
We don't need these running on PR's which have constant pushes, rebases,
etc.

The activity on the repo is slow enough we can fix-up things after
codeql runs on master.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 19:56:04 +00:00
Brian Goff
9d94eea9e9 Merge pull request #1023 from virtual-kubelet/dependabot/github_actions/github/codeql-action-2
Bump github/codeql-action from 1 to 2
2022-08-31 12:48:19 -07:00
dependabot[bot]
de4fe42586 Bump github/codeql-action from 1 to 2
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 1 to 2.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/v1...v2)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:42:26 +00:00
Brian Goff
305e33bfbf Merge pull request #1022 from virtual-kubelet/dependabot/github_actions/actions/setup-go-3
Bump actions/setup-go from 2 to 3
2022-08-31 12:41:50 -07:00
dependabot[bot]
00d8340a64 Bump actions/setup-go from 2 to 3
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 2 to 3.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:36:55 +00:00
Brian Goff
5e5a842dbb Merge pull request #1021 from cpuguy83/dependabot_actions
dependabot: update github actions
2022-08-31 12:36:37 -07:00
Brian Goff
aa94284712 dependabot: update github actions
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 19:35:43 +00:00
Brian Goff
d87dd1c79f Merge pull request #1018 from virtual-kubelet/dependabot/go_modules/contrib.go.opencensus.io/exporter/ocagent-0.7.0
Bump contrib.go.opencensus.io/exporter/ocagent from 0.4.12 to 0.7.0
2022-08-31 12:31:16 -07:00
dependabot[bot]
ab3615b8d7 Bump contrib.go.opencensus.io/exporter/ocagent from 0.4.12 to 0.7.0
Bumps [contrib.go.opencensus.io/exporter/ocagent](https://github.com/census-ecosystem/opencensus-go-exporter-ocagent) from 0.4.12 to 0.7.0.
- [Release notes](https://github.com/census-ecosystem/opencensus-go-exporter-ocagent/releases)
- [Commits](https://github.com/census-ecosystem/opencensus-go-exporter-ocagent/compare/v0.4.12...v0.7.0)

---
updated-dependencies:
- dependency-name: contrib.go.opencensus.io/exporter/ocagent
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:26:33 +00:00
Brian Goff
22d2416dc4 Merge pull request #1020 from virtual-kubelet/dependabot/go_modules/github.com/prometheus/client_golang-1.13.0
Bump github.com/prometheus/client_golang from 1.7.1 to 1.13.0
2022-08-31 12:25:37 -07:00
dependabot[bot]
e1c6e80a7a Bump github.com/prometheus/client_golang from 1.7.1 to 1.13.0
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.7.1 to 1.13.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.7.1...v1.13.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:20:54 +00:00
Brian Goff
1ed3180ec2 Merge pull request #1019 from virtual-kubelet/dependabot/go_modules/github.com/spf13/cobra-1.5.0
Bump github.com/spf13/cobra from 1.0.0 to 1.5.0
2022-08-31 12:19:55 -07:00
dependabot[bot]
97452b493f Bump github.com/spf13/cobra from 1.0.0 to 1.5.0
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.0.0 to 1.5.0.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.0.0...v1.5.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 18:40:59 +00:00
Brian Goff
6363360781 Merge pull request #1004 from mxplusb/feature/dependabot
added basic dependabot integration.
2022-08-31 11:40:07 -07:00
Brian Goff
44d0df547d Merge branch 'master' into feature/dependabot 2022-08-31 11:30:14 -07:00
Brian Goff
10a7559b83 Merge pull request #1006 from yabuchan/opentelemetry
Add opentelemetry as tracing provider
2022-08-31 11:29:56 -07:00
Brian Goff
b98ba29b52 Merge branch 'master' into opentelemetry 2022-08-31 11:09:42 -07:00
Brian Goff
008fe17b91 Merge pull request #1015 from cpuguy83/gh_actions
Add github actions
2022-08-31 11:00:53 -07:00
Brian Goff
ec1fe2070a Merge pull request #1013 from solarkennedy/k8s_1.24_compat
Removed deprecated node Clustername
2022-08-30 22:37:43 -07:00
Brian Goff
48e29d75fc Remove circleci
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 00:58:56 +00:00
Brian Goff
f617ccebc5 Fixup some new lint issues
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 00:58:56 +00:00
Brian Goff
433e0bbd20 Add github actions
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 00:58:51 +00:00
Kyle Anderson
a8f253088c Removed deprecated node Clustername
With this one line, vk fails to build against k8s 1.24 libs.

The comment says:

    // Deprecated: ClusterName is a legacy field that was always cleared by
    // the system and never used; it will be removed completely in 1.25.

Seems to be removed in 1.24 though.
2022-08-25 19:39:11 -07:00
yabuchan
38e662129d remove warning from golinter 2022-07-08 10:23:17 +09:00
yabuchan
95bdbdec0d reflect review comments 2022-07-08 00:18:58 +09:00
yabuchan
1958686b4a remove unnecessary lines 2022-07-04 21:38:38 +09:00
yabuchan
36397f80c2 downgrade opentelemetry to avoid the library conflicts 2022-07-04 19:15:45 +09:00
yabuchan
801b44543c do not save attributes in logger, add fake logger in unit test 2022-07-04 13:52:51 +09:00
yabuchan
853f9ead1c add sampler in test 2022-07-04 00:22:57 +09:00
yabuchan
915445205f add logic for otel 2022-07-03 23:30:10 +09:00
Sienna Lloyd
2b7e4c9dc6 added basic dependabot integration.
Signed-off-by: Sienna Lloyd <sienna.lloyd@hey.com>
2022-06-23 11:30:16 -06:00
Brian Goff
410e05878a Merge pull request #985 from cbsfly/fix-klog
Change klog flag version to v2
2021-11-23 07:28:04 -08:00
chenbingshen.invoker
70c7745444 Change klog flag version to v2 2021-11-18 18:29:26 +08:00
Brian Goff
269ef14a7a Merge pull request #984 from pigletfly/fix-readme
Add scrape pod metrics
2021-11-16 13:54:57 -08:00
pigletfly
faaf14c68d Add scrape pod metrics 2021-11-12 10:56:46 +08:00
48 changed files with 2569 additions and 1082 deletions

View File

@@ -1,167 +0,0 @@
version: 2.0
jobs:
validate:
resource_class: xlarge
docker:
- image: circleci/golang:1.15
environment:
GO111MODULE: "on"
GOPROXY: https://proxy.golang.org
working_directory: /go/src/github.com/virtual-kubelet/virtual-kubelet
steps:
- checkout
- restore_cache:
keys:
- validate-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
- run:
name: go vet
command: V=1 CI=1 make vet
- run:
name: Lint
command: make lint
- run:
name: Dependencies
command: scripts/validate/gomod.sh
- save_cache:
key: validate-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
test:
resource_class: xlarge
docker:
- image: circleci/golang:1.16
environment:
GO111MODULE: "on"
working_directory: /go/src/github.com/virtual-kubelet/virtual-kubelet
steps:
- checkout
- restore_cache:
keys:
- test-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
- run:
name: Build
command: V=1 make build
- run: go install gotest.tools/gotestsum@latest
- run:
name: Tests
environment:
GOTEST: gotestsum -- -timeout=9m
GOTESTSUM_JUNITFILE: output/unit/results.xml
GODEBUG: cgocheck=2
command: |
mkdir -p output/unit
V=1 make test envtest
- save_cache:
key: test-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
- store_test_results:
path: output
e2e:
machine:
image: ubuntu-1604:202010-01
working_directory: /home/circleci/go/src/github.com/virtual-kubelet/virtual-kubelet
environment:
CHANGE_MINIKUBE_NONE_USER: true
GOPATH: /home/circleci/go
KUBECONFIG: /home/circleci/.kube/config
KUBERNETES_VERSION: v1.20.1
MINIKUBE_HOME: /home/circleci
MINIKUBE_VERSION: v1.16.0
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
SKAFFOLD_VERSION: v1.17.2
GO111MODULE: "on"
steps:
- checkout
- run:
name: Install kubectl
command: |
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
mkdir -p ${HOME}/.kube
touch ${HOME}/.kube/config
- run:
name: Install Skaffold
command: |
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/${SKAFFOLD_VERSION}/skaffold-linux-amd64
chmod +x skaffold
sudo mv skaffold /usr/local/bin/
- run:
name: Install Minikube dependencies
command: |
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet # systemd unit is disabled
- run:
name: Install Minikube
command: |
curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
chmod +x minikube
sudo mv minikube /usr/local/bin/
- run:
name: Start Minikube
command: |
sudo -E minikube start --vm-driver=none --cpus 2 --memory 2048 --kubernetes-version=${KUBERNETES_VERSION}
- run:
name: Wait for Minikube
command: |
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}';
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1;
done
- run:
name: Watch pods
command: kubectl get pods -o json --watch
background: true
- run:
name: Watch nodes
command: kubectl get nodes -o json --watch
background: true
- restore_cache:
keys:
- e2e-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}-2
- run:
name: Run the end-to-end test suite
environment:
GOTEST: gotestsum --
command: |
mkdir $HOME/.go
export PATH=$HOME/.go/bin:${PATH}
curl -fsSL -o "/tmp/go.tar.gz" "https://dl.google.com/go/go1.16.4.linux-amd64.tar.gz"
tar -C $HOME/.go --strip-components=1 -xzf "/tmp/go.tar.gz"
go version
mkdir -p output/e2e
export GOTESTSUM_JUNITFILE="$(pwd)/output/e2e/results.xml"
export PATH="${GOPATH}/bin:${PATH}"
go install gotest.tools/gotestsum@latest
make e2e
- store_test_results:
path: output
- save_cache:
key: e2e-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}-2
paths:
- "/home/circleci/go/pkg/mod"
- run:
name: Collect logs on failure from vkubelet-mock-0
command: |
kubectl logs vkubelet-mock-0
when: on_fail
workflows:
version: 2
validate_and_test:
jobs:
- validate
- test
- e2e:
requires:
- validate
- test

View File

@@ -1,4 +1,6 @@
.vscode
private.env
*.private.*
providers/azurebatch/deployment/
providers/azurebatch/deployment/
Dockerfile
.dockerignore

10
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

110
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,110 @@
name: CI
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches: [master]
pull_request:
env:
GO_VERSION: "1.18"
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v3
- uses: golangci/golangci-lint-action@v3
with:
version: v1.48.0
args: --timeout=5m
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v3
- name: Run Tests
run: make test
env-tests:
name: Envtest Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v3
- name: Run Tests
run: make envtest
e2e:
name: E2E
runs-on: ubuntu-22.04
timeout-minutes: 10
env:
CHANGE_MINIKUBE_NONE_USER: true
KUBERNETES_VERSION: v1.20.1
MINIKUBE_HOME: /home/circleci
MINIKUBE_VERSION: v1.16.0
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
SKAFFOLD_VERSION: v1.17.2
GO111MODULE: "on"
steps:
- uses: actions/setup-go@v3
with:
go-version: ${{ env.GO_VERSION }}
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Skaffold
run: |
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/${SKAFFOLD_VERSION}/skaffold-linux-amd64
chmod +x skaffold
sudo mv skaffold /usr/local/bin/
echo /usr/local/bin >> $GITHUB_PATH
- name: Install Minikube dependencies
run: |
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet # systemd unit is disabled
- name: Install Minikube
run: |
curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
chmod +x minikube
sudo mv minikube /usr/local/bin/
- name: Start Minikube
run: |
sudo -E PATH=$PATH minikube start --vm-driver=none --cpus 2 --memory 2048 --kubernetes-version=${KUBERNETES_VERSION}
- name: Wait for Minikube
run: |
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}';
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1;
done
- name: Run Tests
run: make e2e

View File

@@ -1,56 +1,59 @@
name: "CodeQL"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
branches: [master]
schedule:
- cron: '19 18 * * 3'
- cron: "19 18 * * 3"
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
language: ["go"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View File

@@ -2,6 +2,8 @@ linter-settings:
lll:
line-length: 200
timeout: 10m
run:
skip-dirs:
# This directory contains copy code from upstream kubernetes/kubernetes, skip it.
@@ -12,20 +14,26 @@ run:
linters:
enable:
- errcheck
- govet
- ineffassign
- golint
- goconst
- goimports
- unused
- structcheck
- varcheck
- deadcode
- staticcheck
- unconvert
- gofmt
- goimports
- ineffassign
- vet
- unused
- misspell
- nolintlint
- gocritic
- gosec
- exportloopref # Checks for pointers to enclosing loop variables
- tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
linters-settings:
gosec:
excludes:
- G304
issues:
exclude-use-default: false
exclude:
# EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). (is not checked|Errors unhandled)

View File

@@ -1,4 +1,6 @@
FROM golang:1.15 as builder
ARG GOLANG_CI_LINT_VERSION
FROM golang:1.18 as builder
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
COPY . /go/src/github.com/virtual-kubelet/virtual-kubelet
@@ -7,6 +9,22 @@ ARG BUILD_TAGS=""
RUN make VK_BUILD_TAGS="${BUILD_TAGS}" build
RUN cp bin/virtual-kubelet /usr/bin/virtual-kubelet
FROM golangci/golangci-lint:${GOLANG_CI_LINT_VERSION} as lint
WORKDIR /app
COPY go.mod ./
COPY go.sum ./
RUN \
--mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go mod download
COPY . .
ARG OUT_FORMAT
RUN \
--mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/golangci-lint \
golangci-lint run -v --out-format="${OUT_FORMAT:-colored-line-number}"
FROM scratch
COPY --from=builder /usr/bin/virtual-kubelet /usr/bin/virtual-kubelet
COPY --from=builder /etc/ssl/certs/ /etc/ssl/certs

View File

@@ -183,6 +183,10 @@ envtest: kubebuilder_2.3.1_${TEST_OS}_${TEST_ARCH}
fmt:
goimports -w $(shell go list -f '{{.Dir}}' ./...)
export GOLANG_CI_LINT_VERSION ?= v1.48.0
DOCKER_BUILD ?= docker buildx build
.PHONY: lint
lint: $(gobin_tool)
gobin -run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.33.0 run ./...
lint:
$(DOCKER_BUILD) --target=lint --build-arg GOLANG_CI_LINT_VERSION --build-arg OUT_FORMAT .

View File

@@ -1,5 +1,7 @@
# Virtual Kubelet
[![Go Reference](https://pkg.go.dev/badge/github.com/virtual-kubelet/virtual-kubelet.svg)](https://pkg.go.dev/github.com/virtual-kubelet/virtual-kubelet)
Virtual Kubelet is an open source [Kubernetes kubelet](https://kubernetes.io/docs/reference/generated/kubelet/)
implementation that masquerades as a kubelet for the purposes of connecting Kubernetes to other APIs.
This allows the nodes to be backed by other services like ACI, AWS Fargate, [IoT Edge](https://github.com/Azure/iot-edge-virtual-kubelet-provider), [Tensile Kube](https://github.com/virtual-kubelet/tensile-kube) etc. The primary scenario for VK is enabling the extension of the Kubernetes API into serverless container platforms like ACI and Fargate, though we are open to others. However, it should be noted that VK is explicitly not intended to be an alternative to Kubernetes federation.
@@ -271,6 +273,11 @@ One of the roles of a Kubelet is to accept requests from the API server for
things like `kubectl logs` and `kubectl exec`. Helpers for setting this up are
provided [here](https://godoc.org/github.com/virtual-kubelet/virtual-kubelet/node/api)
#### Scrape Pod metrics
If you want to use HPA(Horizontal Pod Autoscaler) in your cluster, the provider should implement the `GetStatsSummary` function. Then metrics-server will be able to get the metrics of the pods on virtual-kubelet. Otherwise, you may see `No metrics for pod ` on metrics-server, which means the metrics of the pods on virtual-kubelet are not collected.
## Testing
### Unit tests
@@ -305,4 +312,3 @@ Monthly Virtual Kubelet Office Hours are held at 10am PST on the last Thursday o
Our google drive with design specifications and meeting notes are [here](https://drive.google.com/drive/folders/19Ndu11WBCCBDowo9CrrGUHoIfd2L8Ueg?usp=sharing).
We also have a community slack channel named virtual-kubelet in the Kubernetes slack. You can also connect with the Virtual Kubelet community via the [mailing list](https://lists.cncf.io/g/virtualkubelet-dev).

View File

@@ -22,7 +22,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/pflag"
"k8s.io/klog"
klog "k8s.io/klog/v2"
)
type mapVar map[string]string
@@ -61,8 +61,10 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
flags.StringVar(&c.KubeConfigPath, "kubeconfig", c.KubeConfigPath, "kube config file to use for connecting to the Kubernetes API server")
flags.StringVar(&c.KubeNamespace, "namespace", c.KubeNamespace, "kubernetes namespace (default is 'all')")
/* #nosec */
flags.MarkDeprecated("namespace", "Nodes must watch for pods in all namespaces. This option is now ignored.") //nolint:errcheck
flags.MarkHidden("namespace") //nolint:errcheck
/* #nosec */
flags.MarkHidden("namespace") //nolint:errcheck
flags.StringVar(&c.KubeClusterDomain, "cluster-domain", c.KubeClusterDomain, "kubernetes cluster-domain (default is 'cluster.local')")
flags.StringVar(&c.NodeName, "nodename", c.NodeName, "kubernetes node name")
@@ -74,13 +76,16 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
flags.StringVar(&c.TaintKey, "taint", c.TaintKey, "Set node taint key")
flags.BoolVar(&c.DisableTaint, "disable-taint", c.DisableTaint, "disable the virtual-kubelet node taint")
/* #nosec */
flags.MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable") //nolint:errcheck
flags.IntVar(&c.PodSyncWorkers, "pod-sync-workers", c.PodSyncWorkers, `set the number of pod synchronization workers`)
flags.BoolVar(&c.EnableNodeLease, "enable-node-lease", c.EnableNodeLease, `use node leases (1.13) for node heartbeats`)
/* #nosec */
flags.MarkDeprecated("enable-node-lease", "leases are always enabled") //nolint:errcheck
flags.MarkHidden("enable-node-lease") //nolint:errcheck
/* #nosec */
flags.MarkHidden("enable-node-lease") //nolint:errcheck
flags.StringSliceVar(&c.TraceExporters, "trace-exporter", c.TraceExporters, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", AvailableTraceExporters()))
flags.StringVar(&c.TraceConfig.ServiceName, "trace-service-name", c.TraceConfig.ServiceName, "sets the name of the service used to register with the trace exporter")

View File

@@ -36,7 +36,7 @@ func getTaint(c Opts) (*corev1.Taint, error) {
key = getEnv("VKUBELET_TAINT_KEY", key)
value = getEnv("VKUBELET_TAINT_VALUE", value)
effectEnv := getEnv("VKUBELET_TAINT_EFFECT", string(c.TaintEffect))
effectEnv := getEnv("VKUBELET_TAINT_EFFECT", c.TaintEffect)
var effect corev1.TaintEffect
switch effectEnv {

View File

@@ -15,6 +15,7 @@
package root
import (
"fmt"
"os"
"path/filepath"
"strconv"
@@ -95,6 +96,8 @@ type Opts struct {
Version string
}
const maxInt32 = 1<<31 - 1
// SetDefaultOpts sets default options for unset values on the passed in option struct.
// Fields tht are already set will not be modified.
func SetDefaultOpts(c *Opts) error {
@@ -128,6 +131,10 @@ func SetDefaultOpts(c *Opts) error {
if err != nil {
return errors.Wrap(err, "error parsing KUBELET_PORT environment variable")
}
if p > maxInt32 {
return fmt.Errorf("KUBELET_PORT environment variable is too large")
}
/* #nosec */
c.ListenPort = int32(p)
} else {
c.ListenPort = DefaultListenPort

View File

@@ -21,6 +21,7 @@ import (
"os"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
@@ -105,7 +106,8 @@ func setupZpages(ctx context.Context) {
zpages.Handle(mux, "/debug")
go func() {
// This should never terminate, if it does, it will always terminate with an error
e := http.Serve(listener, mux)
srv := &http.Server{Handler: mux, ReadHeaderTimeout: 30 * time.Second}
e := srv.Serve(listener)
if e == http.ErrServerClosed {
return
}

View File

@@ -19,7 +19,8 @@ import (
"go.opencensus.io/trace"
)
type TracingExporterOptions struct { // nolint: golint
// TracingExporterOptions is the options passed to the tracing exporter init function.
type TracingExporterOptions struct { //nolint: golint
Tags map[string]string
ServiceName string
}

View File

@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !no_jaeger_exporter
// +build !no_jaeger_exporter
package root
import (
"errors"
"fmt"
"os"
"contrib.go.opencensus.io/exporter/jaeger"
@@ -32,7 +32,6 @@ func init() {
// NewJaegerExporter creates a new opencensus tracing exporter.
func NewJaegerExporter(opts TracingExporterOptions) (trace.Exporter, error) {
jOpts := jaeger.Options{
Endpoint: os.Getenv("JAEGER_ENDPOINT"), // deprecated
CollectorEndpoint: os.Getenv("JAEGER_COLLECTOR_ENDPOINT"),
AgentEndpoint: os.Getenv("JAEGER_AGENT_ENDPOINT"),
Username: os.Getenv("JAEGER_USER"),
@@ -42,11 +41,8 @@ func NewJaegerExporter(opts TracingExporterOptions) (trace.Exporter, error) {
},
}
if jOpts.Endpoint != "" && jOpts.CollectorEndpoint == "" { // nolintlint:staticcheck
jOpts.CollectorEndpoint = fmt.Sprintf("%s/api/traces", jOpts.Endpoint) // nolintlint:staticcheck
}
if jOpts.CollectorEndpoint == "" && jOpts.AgentEndpoint == "" { // nolintlint:staticcheck
return nil, errors.New("Must specify either JAEGER_COLLECTOR_ENDPOINT or JAEGER_AGENT_ENDPOINT")
return nil, errors.New("must specify either JAEGER_COLLECTOR_ENDPOINT or JAEGER_AGENT_ENDPOINT")
}
for k, v := range opts.Tags {

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !no_ocagent_exporter
// +build !no_ocagent_exporter
package root

View File

@@ -5,19 +5,19 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"strings"
"time"
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"github.com/virtual-kubelet/virtual-kubelet/trace"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
const (
@@ -42,7 +42,7 @@ var (
*/
// MockProvider implements the virtual-kubelet provider interface and stores pods in memory.
type MockProvider struct { // nolint:golint
type MockProvider struct { //nolint:golint
nodeName string
operatingSystem string
internalIP string
@@ -54,7 +54,7 @@ type MockProvider struct { // nolint:golint
}
// MockConfig contains a mock virtual-kubelet's configurable parameters.
type MockConfig struct { // nolint:golint
type MockConfig struct { //nolint:golint
CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
Pods string `json:"pods,omitempty"`
@@ -97,7 +97,7 @@ func NewMockProvider(providerConfig, nodeName, operatingSystem string, internalI
// loadConfig loads the given json configuration files.
func loadConfig(providerConfig, nodeName string) (config MockConfig, err error) {
data, err := ioutil.ReadFile(providerConfig)
data, err := os.ReadFile(providerConfig)
if err != nil {
return config, err
}
@@ -283,7 +283,7 @@ func (p *MockProvider) GetContainerLogs(ctx context.Context, namespace, podName,
ctx = addAttributes(ctx, span, namespaceKey, namespace, nameKey, podName, containerNameKey, containerName)
log.G(ctx).Infof("receive GetContainerLogs %q", podName)
return ioutil.NopCloser(strings.NewReader("")), nil
return io.NopCloser(strings.NewReader("")), nil
}
// RunInContainer executes a command in a container in the pod, copying data
@@ -328,8 +328,8 @@ func (p *MockProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
return pods, nil
}
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { // nolint:golint
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") // nolint:staticcheck,ineffassign
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { //nolint:golint
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") //nolint:staticcheck,ineffassign
defer span.End()
n.Status.Capacity = p.capacity()
@@ -467,10 +467,14 @@ func (p *MockProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, err
for _, container := range pod.Spec.Containers {
// Grab a dummy value to be used as the total CPU usage.
// The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
/* #nosec */
dummyUsageNanoCores := uint64(rand.Uint32())
totalUsageNanoCores += dummyUsageNanoCores
// Create a dummy value to be used as the total RAM usage.
// The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
/* #nosec */
dummyUsageBytes := uint64(rand.Uint32())
totalUsageBytes += dummyUsageBytes
// Append a ContainerStats object containing the dummy stats to the PodStats object.

View File

@@ -13,7 +13,7 @@ type Store struct {
ls map[string]InitFunc
}
func NewStore() *Store { // nolint:golint
func NewStore() *Store { //nolint:golint
return &Store{
ls: make(map[string]InitFunc),
}
@@ -71,4 +71,4 @@ type InitConfig struct {
ResourceManager *manager.ResourceManager
}
type InitFunc func(InitConfig) (Provider, error) // nolint:golint
type InitFunc func(InitConfig) (Provider, error) //nolint:golint

View File

@@ -7,7 +7,7 @@ const (
OperatingSystemWindows = "windows"
)
type OperatingSystems map[string]bool // nolint:golint
type OperatingSystems map[string]bool //nolint:golint
var (
// ValidOperatingSystems defines the group of operating systems
@@ -18,7 +18,7 @@ var (
}
)
func (o OperatingSystems) Names() []string { // nolint:golint
func (o OperatingSystems) Names() []string { //nolint:golint
keys := make([]string, 0, len(o))
for k := range o {
keys = append(keys, k)

View File

@@ -6,6 +6,7 @@ import (
)
func registerMock(s *provider.Store) {
/* #nosec */
s.Register("mock", func(cfg provider.InitConfig) (provider.Provider, error) { //nolint:errcheck
return mock.NewMockProvider(
cfg.ConfigPath,

116
go.mod
View File

@@ -1,34 +1,100 @@
module github.com/virtual-kubelet/virtual-kubelet
go 1.15
go 1.19
require (
contrib.go.opencensus.io/exporter/jaeger v0.1.0
contrib.go.opencensus.io/exporter/ocagent v0.4.12
github.com/bombsimon/logrusr v1.0.0
github.com/docker/spdystream v0.0.0-20170912183627-bc6354cbbc29 // indirect
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 // indirect
github.com/google/go-cmp v0.5.2
github.com/gorilla/mux v1.7.3
contrib.go.opencensus.io/exporter/jaeger v0.2.1
contrib.go.opencensus.io/exporter/ocagent v0.7.0
github.com/bombsimon/logrusr/v3 v3.1.0
github.com/google/go-cmp v0.5.9
github.com/gorilla/mux v1.8.0
github.com/mitchellh/go-homedir v1.1.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.0.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
go.opencensus.io v0.22.2
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/api v0.15.1 // indirect
go.opencensus.io v0.24.0
go.opentelemetry.io/otel v1.12.0
go.opentelemetry.io/otel/sdk v1.12.0
go.opentelemetry.io/otel/trace v1.12.0
golang.org/x/sync v0.1.0
golang.org/x/time v0.3.0
gotest.tools v2.2.0+incompatible
k8s.io/api v0.19.10
k8s.io/apimachinery v0.19.10
k8s.io/apiserver v0.19.10
k8s.io/client-go v0.19.10
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.2.0
k8s.io/utils v0.0.0-20200912215256-4140de9c8800
sigs.k8s.io/controller-runtime v0.7.1
k8s.io/api v0.26.1
k8s.io/apimachinery v0.26.1
k8s.io/apiserver v0.26.1
k8s.io/client-go v0.26.1
k8s.io/klog/v2 v2.90.0
k8s.io/kubelet v0.26.1
k8s.io/utils v0.0.0-20230202215443-34013725500c
sigs.k8s.io/controller-runtime v0.14.4
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.10.0 // indirect
go.opentelemetry.io/otel/metric v0.31.0 // indirect
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/term v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
google.golang.org/api v0.43.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.26.1 // indirect
k8s.io/component-base v0.26.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

879
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +0,0 @@
package lockdeps
import (
// TODO(Sargun): Remove in Go1.13
// This is a dep that `go mod tidy` keeps removing, because it's a transitive dep that's pulled in via a test
// See: https://github.com/golang/go/issues/29702
_ "github.com/prometheus/client_golang/prometheus"
_ "golang.org/x/sys/unix"
)

View File

@@ -123,7 +123,7 @@ func TestGetConfigMap(t *testing.T) {
}
value := configMap.Data["key-0"]
if value != "val-0" {
t.Fatal("got unexpected value", string(value))
t.Fatal("got unexpected value", value)
}
// Try to get a configmap that does not exist, and make sure we've got a "not found" error as a response.

View File

@@ -4,8 +4,8 @@ import (
"context"
"encoding/json"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"k8s.io/apimachinery/pkg/util/net"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
// GetStatsSummary queries the /stats/summary endpoint of the virtual-kubelet and returns the Summary object obtained as a response.

View File

@@ -0,0 +1,206 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package token implements a manager of serviceaccount tokens for pods running
// on the node.
package token
import (
"context"
"errors"
"fmt"
"math/rand"
"sync"
"time"
authenticationv1 "k8s.io/api/authentication/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
)
const (
maxTTL = 24 * time.Hour
gcPeriod = time.Minute
maxJitter = 10 * time.Second
)
// NewManager returns a new token manager.
func NewManager(c clientset.Interface) *Manager {
// check whether the server supports token requests so we can give a more helpful error message
supported := false
once := &sync.Once{}
tokenRequestsSupported := func() bool {
once.Do(func() {
resources, err := c.Discovery().ServerResourcesForGroupVersion("v1")
if err != nil {
return
}
for _, resource := range resources.APIResources {
if resource.Name == "serviceaccounts/token" {
supported = true
return
}
}
})
return supported
}
m := &Manager{
getToken: func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
if c == nil {
return nil, errors.New("cannot use TokenManager when kubelet is in standalone mode")
}
tokenRequest, err := c.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{})
if apierrors.IsNotFound(err) && !tokenRequestsSupported() {
return nil, fmt.Errorf("the API server does not have TokenRequest endpoints enabled")
}
return tokenRequest, err
},
cache: make(map[string]*authenticationv1.TokenRequest),
clock: clock.RealClock{},
}
go wait.Forever(m.cleanup, gcPeriod)
return m
}
// Manager manages service account tokens for pods.
type Manager struct {
// cacheMutex guards the cache
cacheMutex sync.RWMutex
cache map[string]*authenticationv1.TokenRequest
// mocked for testing
getToken func(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error)
clock clock.Clock
}
// GetServiceAccountToken gets a service account token for a pod from cache or
// from the TokenRequest API. This process is as follows:
// * Check the cache for the current token request.
// * If the token exists and does not require a refresh, return the current token.
// * Attempt to refresh the token.
// * If the token is refreshed successfully, save it in the cache and return the token.
// * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctr) {
return ctr, nil
}
tr, err := m.getToken(name, namespace, tr)
if err != nil {
switch {
case !ok:
return nil, fmt.Errorf("failed to fetch token: %v", err)
case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default:
klog.ErrorS(err, "Couldn't update token", "cacheKey", key)
return ctr, nil
}
}
m.set(key, tr)
return tr, nil
}
// DeleteServiceAccountToken should be invoked when pod got deleted. It simply
// clean token manager cache.
func (m *Manager) DeleteServiceAccountToken(podUID types.UID) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if tr.Spec.BoundObjectRef.UID == podUID {
delete(m.cache, k)
}
}
}
func (m *Manager) cleanup() {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
for k, tr := range m.cache {
if m.expired(tr) {
delete(m.cache, k)
}
}
}
func (m *Manager) get(key string) (*authenticationv1.TokenRequest, bool) {
m.cacheMutex.RLock()
defer m.cacheMutex.RUnlock()
ctr, ok := m.cache[key]
return ctr, ok
}
func (m *Manager) set(key string, tr *authenticationv1.TokenRequest) {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
m.cache[key] = tr
}
func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
return m.clock.Now().After(t.Status.ExpirationTimestamp.Time)
}
// requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil {
cpy := tr.DeepCopy()
cpy.Status.Token = ""
klog.ErrorS(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy)
return false
}
now := m.clock.Now()
exp := tr.Status.ExpirationTimestamp.Time
iat := exp.Add(-1 * time.Duration(*tr.Spec.ExpirationSeconds) * time.Second)
jitter := time.Duration(rand.Float64()*maxJitter.Seconds()) * time.Second
if now.After(iat.Add(maxTTL - jitter)) {
return true
}
// Require a refresh if within 20% of the TTL plus a jitter from the expiration time.
if now.After(exp.Add(-1*time.Duration((*tr.Spec.ExpirationSeconds*20)/100)*time.Second - jitter)) {
return true
}
return false
}
// keys should be nonconfidential and safe to log
func keyFunc(name, namespace string, tr *authenticationv1.TokenRequest) string {
var exp int64
if tr.Spec.ExpirationSeconds != nil {
exp = *tr.Spec.ExpirationSeconds
}
var ref authenticationv1.BoundObjectReference
if tr.Spec.BoundObjectRef != nil {
ref = *tr.Spec.BoundObjectRef
}
return fmt.Sprintf("%q/%q/%#v/%#v/%#v", name, namespace, tr.Spec.Audiences, exp, ref)
}

View File

@@ -0,0 +1,606 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package token
import (
"fmt"
"testing"
"time"
authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
testingclock "k8s.io/utils/clock/testing"
)
func TestTokenCachingAndExpiration(t *testing.T) {
type suite struct {
clock *testingclock.FakeClock
tg *fakeTokenGetter
mgr *Manager
}
cases := []struct {
name string
exp time.Duration
f func(t *testing.T, s *suite)
}{
{
name: "rotate hour token expires in the last 12 minutes",
exp: time.Hour,
f: func(t *testing.T, s *suite) {
s.clock.SetTime(s.clock.Now().Add(50 * time.Minute))
if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s.tg.count != 2 {
t.Fatalf("expected token to be refreshed: call count was %d", s.tg.count)
}
},
},
{
name: "rotate 24 hour token that expires in 40 hours",
exp: 40 * time.Hour,
f: func(t *testing.T, s *suite) {
s.clock.SetTime(s.clock.Now().Add(25 * time.Hour))
if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s.tg.count != 2 {
t.Fatalf("expected token to be refreshed: call count was %d", s.tg.count)
}
},
},
{
name: "rotate hour token fails, old token is still valid, doesn't error",
exp: time.Hour,
f: func(t *testing.T, s *suite) {
s.clock.SetTime(s.clock.Now().Add(50 * time.Minute))
tg := &fakeTokenGetter{
err: fmt.Errorf("err"),
}
s.mgr.getToken = tg.getToken
tr, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if tr.Status.Token != "foo" {
t.Fatalf("unexpected token: %v", tr.Status.Token)
}
},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clock := testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
expSecs := int64(c.exp.Seconds())
s := &suite{
clock: clock,
mgr: NewManager(nil),
tg: &fakeTokenGetter{
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
ExpirationSeconds: &expSecs,
},
Status: authenticationv1.TokenRequestStatus{
Token: "foo",
ExpirationTimestamp: metav1.Time{Time: clock.Now().Add(c.exp)},
},
},
},
}
s.mgr.getToken = s.tg.getToken
s.mgr.clock = s.clock
if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s.tg.count != 1 {
t.Fatalf("unexpected client call, got: %d, want: 1", s.tg.count)
}
if _, err := s.mgr.GetServiceAccountToken("a", "b", getTokenRequest()); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if s.tg.count != 1 {
t.Fatalf("expected token to be served from cache: saw %d", s.tg.count)
}
c.f(t, s)
})
}
}
func TestRequiresRefresh(t *testing.T) {
start := time.Now()
cases := []struct {
now, exp time.Time
expectRefresh bool
requestTweaks func(*authenticationv1.TokenRequest)
}{
{
now: start.Add(10 * time.Minute),
exp: start.Add(60 * time.Minute),
expectRefresh: false,
},
{
now: start.Add(50 * time.Minute),
exp: start.Add(60 * time.Minute),
expectRefresh: true,
},
{
now: start.Add(25 * time.Hour),
exp: start.Add(60 * time.Hour),
expectRefresh: true,
},
{
now: start.Add(70 * time.Minute),
exp: start.Add(60 * time.Minute),
expectRefresh: true,
},
{
// expiry will be overwritten by the tweak below.
now: start.Add(0 * time.Minute),
exp: start.Add(60 * time.Minute),
expectRefresh: false,
requestTweaks: func(tr *authenticationv1.TokenRequest) {
tr.Spec.ExpirationSeconds = nil
},
},
}
for i, c := range cases {
t.Run(fmt.Sprint(i), func(t *testing.T) {
clock := testingclock.NewFakeClock(c.now)
secs := int64(c.exp.Sub(start).Seconds())
tr := &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
ExpirationSeconds: &secs,
},
Status: authenticationv1.TokenRequestStatus{
ExpirationTimestamp: metav1.Time{Time: c.exp},
},
}
if c.requestTweaks != nil {
c.requestTweaks(tr)
}
mgr := NewManager(nil)
mgr.clock = clock
rr := mgr.requiresRefresh(tr)
if rr != c.expectRefresh {
t.Fatalf("unexpected requiresRefresh result, got: %v, want: %v", rr, c.expectRefresh)
}
})
}
}
func TestDeleteServiceAccountToken(t *testing.T) {
type request struct {
name, namespace string
tr authenticationv1.TokenRequest
shouldFail bool
}
cases := []struct {
name string
requestIndex []int
deletePodUID []types.UID
expLeftIndex []int
}{
{
name: "delete none with all success requests",
requestIndex: []int{0, 1, 2},
expLeftIndex: []int{0, 1, 2},
},
{
name: "delete one with all success requests",
requestIndex: []int{0, 1, 2},
deletePodUID: []types.UID{"fake-uid-1"},
expLeftIndex: []int{1, 2},
},
{
name: "delete two with all success requests",
requestIndex: []int{0, 1, 2},
deletePodUID: []types.UID{"fake-uid-1", "fake-uid-3"},
expLeftIndex: []int{1},
},
{
name: "delete all with all success requests",
requestIndex: []int{0, 1, 2},
deletePodUID: []types.UID{"fake-uid-1", "fake-uid-2", "fake-uid-3"},
},
{
name: "delete no pod with failed requests",
requestIndex: []int{0, 1, 2, 3},
deletePodUID: []types.UID{},
expLeftIndex: []int{0, 1, 2},
},
{
name: "delete other pod with failed requests",
requestIndex: []int{0, 1, 2, 3},
deletePodUID: []types.UID{"fake-uid-2"},
expLeftIndex: []int{0, 2},
},
{
name: "delete no pod with request which success after failure",
requestIndex: []int{0, 1, 2, 3, 4},
deletePodUID: []types.UID{},
expLeftIndex: []int{0, 1, 2, 4},
},
{
name: "delete the pod which success after failure",
requestIndex: []int{0, 1, 2, 3, 4},
deletePodUID: []types.UID{"fake-uid-4"},
expLeftIndex: []int{0, 1, 2},
},
{
name: "delete other pod with request which success after failure",
requestIndex: []int{0, 1, 2, 3, 4},
deletePodUID: []types.UID{"fake-uid-1"},
expLeftIndex: []int{1, 2, 4},
},
{
name: "delete some pod not in the set",
requestIndex: []int{0, 1, 2},
deletePodUID: []types.UID{"fake-uid-100", "fake-uid-200"},
expLeftIndex: []int{0, 1, 2},
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
requests := []request{
{
name: "fake-name-1",
namespace: "fake-namespace-1",
tr: authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
BoundObjectRef: &authenticationv1.BoundObjectReference{
UID: "fake-uid-1",
Name: "fake-name-1",
},
},
},
shouldFail: false,
},
{
name: "fake-name-2",
namespace: "fake-namespace-2",
tr: authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
BoundObjectRef: &authenticationv1.BoundObjectReference{
UID: "fake-uid-2",
Name: "fake-name-2",
},
},
},
shouldFail: false,
},
{
name: "fake-name-3",
namespace: "fake-namespace-3",
tr: authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
BoundObjectRef: &authenticationv1.BoundObjectReference{
UID: "fake-uid-3",
Name: "fake-name-3",
},
},
},
shouldFail: false,
},
{
name: "fake-name-4",
namespace: "fake-namespace-4",
tr: authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
BoundObjectRef: &authenticationv1.BoundObjectReference{
UID: "fake-uid-4",
Name: "fake-name-4",
},
},
},
shouldFail: true,
},
{
//exactly the same with last one, besides it will success
name: "fake-name-4",
namespace: "fake-namespace-4",
tr: authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
BoundObjectRef: &authenticationv1.BoundObjectReference{
UID: "fake-uid-4",
Name: "fake-name-4",
},
},
},
shouldFail: false,
},
}
testMgr := NewManager(nil)
testMgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
successGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
tr.Status = authenticationv1.TokenRequestStatus{
ExpirationTimestamp: metav1.Time{Time: testMgr.clock.Now().Add(10 * time.Hour)},
}
return tr, nil
}
failGetToken := func(_, _ string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return nil, fmt.Errorf("fail tr")
}
for _, index := range c.requestIndex {
req := requests[index]
if req.shouldFail {
testMgr.getToken = failGetToken
} else {
testMgr.getToken = successGetToken
}
testMgr.GetServiceAccountToken(req.namespace, req.name, &req.tr)
}
for _, uid := range c.deletePodUID {
testMgr.DeleteServiceAccountToken(uid)
}
if len(c.expLeftIndex) != len(testMgr.cache) {
t.Errorf("%s got unexpected result: expected left cache size is %d, got %d", c.name, len(c.expLeftIndex), len(testMgr.cache))
}
for _, leftIndex := range c.expLeftIndex {
r := requests[leftIndex]
_, ok := testMgr.get(keyFunc(r.name, r.namespace, &r.tr))
if !ok {
t.Errorf("%s got unexpected result: expected token request %v exist in cache, but not", c.name, r)
}
}
})
}
}
type fakeTokenGetter struct {
count int
tr *authenticationv1.TokenRequest
err error
}
func (ftg *fakeTokenGetter) getToken(name, namespace string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
ftg.count++
return ftg.tr, ftg.err
}
func TestCleanup(t *testing.T) {
cases := []struct {
name string
relativeExp time.Duration
expectedCacheSize int
}{
{
name: "don't cleanup unexpired tokens",
relativeExp: -1 * time.Hour,
expectedCacheSize: 0,
},
{
name: "cleanup expired tokens",
relativeExp: time.Hour,
expectedCacheSize: 1,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
clock := testingclock.NewFakeClock(time.Time{}.Add(24 * time.Hour))
mgr := NewManager(nil)
mgr.clock = clock
mgr.set("key", &authenticationv1.TokenRequest{
Status: authenticationv1.TokenRequestStatus{
ExpirationTimestamp: metav1.Time{Time: mgr.clock.Now().Add(c.relativeExp)},
},
})
mgr.cleanup()
if got, want := len(mgr.cache), c.expectedCacheSize; got != want {
t.Fatalf("unexpected number of cache entries after cleanup, got: %d, want: %d", got, want)
}
})
}
}
func TestKeyFunc(t *testing.T) {
type tokenRequestUnit struct {
name string
namespace string
tr *authenticationv1.TokenRequest
}
getKeyFunc := func(u tokenRequestUnit) string {
return keyFunc(u.name, u.namespace, u.tr)
}
cases := []struct {
name string
trus []tokenRequestUnit
target tokenRequestUnit
shouldHit bool
}{
{
name: "hit",
trus: []tokenRequestUnit{
{
name: "foo-sa",
namespace: "foo-ns",
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"foo1", "foo2"},
ExpirationSeconds: getInt64Point(2000),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
Name: "foo-pod",
UID: "foo-uid",
},
},
},
},
{
name: "ame-sa",
namespace: "ame-ns",
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"ame1", "ame2"},
ExpirationSeconds: getInt64Point(2000),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
Name: "ame-pod",
UID: "ame-uid",
},
},
},
},
},
target: tokenRequestUnit{
name: "foo-sa",
namespace: "foo-ns",
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"foo1", "foo2"},
ExpirationSeconds: getInt64Point(2000),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
Name: "foo-pod",
UID: "foo-uid",
},
},
},
},
shouldHit: true,
},
{
name: "not hit due to different ExpirationSeconds",
trus: []tokenRequestUnit{
{
name: "foo-sa",
namespace: "foo-ns",
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"foo1", "foo2"},
ExpirationSeconds: getInt64Point(2000),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
Name: "foo-pod",
UID: "foo-uid",
},
},
},
},
},
target: tokenRequestUnit{
name: "foo-sa",
namespace: "foo-ns",
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"foo1", "foo2"},
//everthing is same besides ExpirationSeconds
ExpirationSeconds: getInt64Point(2001),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
Name: "foo-pod",
UID: "foo-uid",
},
},
},
},
shouldHit: false,
},
{
name: "not hit due to different BoundObjectRef",
trus: []tokenRequestUnit{
{
name: "foo-sa",
namespace: "foo-ns",
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"foo1", "foo2"},
ExpirationSeconds: getInt64Point(2000),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
Name: "foo-pod",
UID: "foo-uid",
},
},
},
},
},
target: tokenRequestUnit{
name: "foo-sa",
namespace: "foo-ns",
tr: &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"foo1", "foo2"},
ExpirationSeconds: getInt64Point(2000),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
//everthing is same besides BoundObjectRef.Name
Name: "diff-pod",
UID: "foo-uid",
},
},
},
},
shouldHit: false,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
mgr := NewManager(nil)
mgr.clock = testingclock.NewFakeClock(time.Time{}.Add(30 * 24 * time.Hour))
for _, tru := range c.trus {
mgr.set(getKeyFunc(tru), &authenticationv1.TokenRequest{
Status: authenticationv1.TokenRequestStatus{
//make sure the token cache would not be cleaned by token manager clenaup func
ExpirationTimestamp: metav1.Time{Time: mgr.clock.Now().Add(50 * time.Minute)},
},
})
}
_, hit := mgr.get(getKeyFunc(c.target))
if hit != c.shouldHit {
t.Errorf("%s got unexpected hit result: expected to be %t, got %t", c.name, c.shouldHit, hit)
}
})
}
}
func getTokenRequest() *authenticationv1.TokenRequest {
return &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"foo1", "foo2"},
ExpirationSeconds: getInt64Point(2000),
BoundObjectRef: &authenticationv1.BoundObjectReference{
Kind: "pod",
Name: "foo-pod",
UID: "foo-uid",
},
},
}
}
func getInt64Point(v int64) *int64 {
return &v
}

View File

@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

View File

@@ -33,7 +33,9 @@ func handleError(f handlerFunc) http.HandlerFunc {
code := httpStatusCode(err)
w.WriteHeader(code)
io.WriteString(w, err.Error()) //nolint:errcheck
if _, err := io.WriteString(w, err.Error()); err != nil {
log.G(req.Context()).WithError(err).Error("error writing error response")
}
logger := log.G(req.Context()).WithError(err).WithField("httpStatusCode", code)
if code >= 500 {

View File

@@ -24,14 +24,15 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
)
type PodListerFunc func(context.Context) ([]*v1.Pod, error) // nolint:golint
type PodListerFunc func(context.Context) ([]*v1.Pod, error) //nolint:golint
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { // nolint:golint
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { //nolint:golint
if getPods == nil {
return NotImplemented
}
scheme := runtime.NewScheme()
/* #nosec */
v1.SchemeBuilder.AddToScheme(scheme) //nolint:errcheck
codecs := serializer.NewCodecFactory(scheme)

View File

@@ -33,7 +33,7 @@ type ServeMux interface {
Handle(path string, h http.Handler)
}
type PodHandlerConfig struct { // nolint:golint
type PodHandlerConfig struct { //nolint:golint
RunInContainer ContainerExecHandlerFunc
GetContainerLogs ContainerLogsHandlerFunc
// GetPods is meant to enumerate the pods that the provider knows about
@@ -79,7 +79,7 @@ func PodHandler(p PodHandlerConfig, debug bool) http.Handler {
// PodStatsSummaryHandler creates an http handler for serving pod metrics.
//
// If the passed in handler func is nil this will create handlers which only
// serves http.StatusNotImplemented
// serves http.StatusNotImplemented
func PodStatsSummaryHandler(f PodStatsSummaryHandlerFunc) http.Handler {
if f == nil {
return http.HandlerFunc(NotImplemented)

View File

@@ -20,11 +20,11 @@ import (
"net/http"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
// PodStatsSummaryHandlerFunc defines the handler for getting pod stats summaries
type PodStatsSummaryHandlerFunc func(context.Context) (*statsv1alpha1.Summary, error)
type PodStatsSummaryHandlerFunc func(context.Context) (*stats.Summary, error)
// HandlePodStatsSummary makes an HTTP handler for implementing the kubelet summary stats endpoint
func HandlePodStatsSummary(h PodStatsSummaryHandlerFunc) http.HandlerFunc {

View File

@@ -1,7 +0,0 @@
These types are copied from the [k8s.io/kubelet](https://pkg.go.dev/k8s.io/kubelet@v0.21.0/pkg/apis/stats/v1alpha1) module.
They are used from a type alias in the API package.
It is being used this way because the module is only available from 1.20 and on, but currently we are pinned to v1.19 and plan to continue to support v1.19 for some time.
Likewise we want to stop importing k8s.io/kubernetes (where the older type def is) since this transatively imports all of kubernetes.
After the min version is v1.20 we can update the type alias to point the the module and remove these type definitions.

View File

@@ -1,345 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package statsv1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Summary is a top-level container for holding NodeStats and PodStats.
type Summary struct {
// Overall node stats.
Node NodeStats `json:"node"`
// Per-pod stats.
Pods []PodStats `json:"pods"`
}
// NodeStats holds node-level unprocessed sample stats.
type NodeStats struct {
// Reference to the measured Node.
NodeName string `json:"nodeName"`
// Stats of system daemons tracked as raw containers.
// The system containers are named according to the SystemContainer* constants.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
SystemContainers []ContainerStats `json:"systemContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// The time at which data collection for the node-scoped (i.e. aggregate) stats was (re)started.
StartTime metav1.Time `json:"startTime"`
// Stats pertaining to CPU resources.
// +optional
CPU *CPUStats `json:"cpu,omitempty"`
// Stats pertaining to memory (RAM) resources.
// +optional
Memory *MemoryStats `json:"memory,omitempty"`
// Stats pertaining to network resources.
// +optional
Network *NetworkStats `json:"network,omitempty"`
// Stats pertaining to total usage of filesystem resources on the rootfs used by node k8s components.
// NodeFs.Used is the total bytes used on the filesystem.
// +optional
Fs *FsStats `json:"fs,omitempty"`
// Stats about the underlying container runtime.
// +optional
Runtime *RuntimeStats `json:"runtime,omitempty"`
// Stats about the rlimit of system.
// +optional
Rlimit *RlimitStats `json:"rlimit,omitempty"`
}
// RlimitStats are stats rlimit of OS.
type RlimitStats struct {
Time metav1.Time `json:"time"`
// The max PID of OS.
MaxPID *int64 `json:"maxpid,omitempty"`
// The number of running process in the OS.
NumOfRunningProcesses *int64 `json:"curproc,omitempty"`
}
// RuntimeStats are stats pertaining to the underlying container runtime.
type RuntimeStats struct {
// Stats about the underlying filesystem where container images are stored.
// This filesystem could be the same as the primary (root) filesystem.
// Usage here refers to the total number of bytes occupied by images on the filesystem.
// +optional
ImageFs *FsStats `json:"imageFs,omitempty"`
}
const (
// SystemContainerKubelet is the container name for the system container tracking Kubelet usage.
SystemContainerKubelet = "kubelet"
// SystemContainerRuntime is the container name for the system container tracking the runtime (e.g. docker) usage.
SystemContainerRuntime = "runtime"
// SystemContainerMisc is the container name for the system container tracking non-kubernetes processes.
SystemContainerMisc = "misc"
// SystemContainerPods is the container name for the system container tracking user pods.
SystemContainerPods = "pods"
)
// ProcessStats are stats pertaining to processes.
type ProcessStats struct {
// Number of processes
// +optional
ProcessCount *uint64 `json:"process_count,omitempty"`
}
// PodStats holds pod-level unprocessed sample stats.
type PodStats struct {
// Reference to the measured Pod.
PodRef PodReference `json:"podRef"`
// The time at which data collection for the pod-scoped (e.g. network) stats was (re)started.
StartTime metav1.Time `json:"startTime"`
// Stats of containers in the measured pod.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []ContainerStats `json:"containers" patchStrategy:"merge" patchMergeKey:"name"`
// Stats pertaining to CPU resources consumed by pod cgroup (which includes all containers' resource usage and pod overhead).
// +optional
CPU *CPUStats `json:"cpu,omitempty"`
// Stats pertaining to memory (RAM) resources consumed by pod cgroup (which includes all containers' resource usage and pod overhead).
// +optional
Memory *MemoryStats `json:"memory,omitempty"`
// Stats pertaining to network resources.
// +optional
Network *NetworkStats `json:"network,omitempty"`
// Stats pertaining to volume usage of filesystem resources.
// VolumeStats.UsedBytes is the number of bytes used by the Volume
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
VolumeStats []VolumeStats `json:"volume,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
// EphemeralStorage reports the total filesystem usage for the containers and emptyDir-backed volumes in the measured Pod.
// +optional
EphemeralStorage *FsStats `json:"ephemeral-storage,omitempty"`
// ProcessStats pertaining to processes.
// +optional
ProcessStats *ProcessStats `json:"process_stats,omitempty"`
}
// ContainerStats holds container-level unprocessed sample stats.
type ContainerStats struct {
// Reference to the measured container.
Name string `json:"name"`
// The time at which data collection for this container was (re)started.
StartTime metav1.Time `json:"startTime"`
// Stats pertaining to CPU resources.
// +optional
CPU *CPUStats `json:"cpu,omitempty"`
// Stats pertaining to memory (RAM) resources.
// +optional
Memory *MemoryStats `json:"memory,omitempty"`
// Metrics for Accelerators. Each Accelerator corresponds to one element in the array.
Accelerators []AcceleratorStats `json:"accelerators,omitempty"`
// Stats pertaining to container rootfs usage of filesystem resources.
// Rootfs.UsedBytes is the number of bytes used for the container write layer.
// +optional
Rootfs *FsStats `json:"rootfs,omitempty"`
// Stats pertaining to container logs usage of filesystem resources.
// Logs.UsedBytes is the number of bytes used for the container logs.
// +optional
Logs *FsStats `json:"logs,omitempty"`
// User defined metrics that are exposed by containers in the pod. Typically, we expect only one container in the pod to be exposing user defined metrics. In the event of multiple containers exposing metrics, they will be combined here.
// +patchMergeKey=name
// +patchStrategy=merge
UserDefinedMetrics []UserDefinedMetric `json:"userDefinedMetrics,omitempty" patchStrategy:"merge" patchMergeKey:"name"`
}
// PodReference contains enough information to locate the referenced pod.
type PodReference struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
UID string `json:"uid"`
}
// InterfaceStats contains resource value data about interface.
type InterfaceStats struct {
// The name of the interface
Name string `json:"name"`
// Cumulative count of bytes received.
// +optional
RxBytes *uint64 `json:"rxBytes,omitempty"`
// Cumulative count of receive errors encountered.
// +optional
RxErrors *uint64 `json:"rxErrors,omitempty"`
// Cumulative count of bytes transmitted.
// +optional
TxBytes *uint64 `json:"txBytes,omitempty"`
// Cumulative count of transmit errors encountered.
// +optional
TxErrors *uint64 `json:"txErrors,omitempty"`
}
// NetworkStats contains data about network resources.
type NetworkStats struct {
// The time at which these stats were updated.
Time metav1.Time `json:"time"`
// Stats for the default interface, if found
InterfaceStats `json:",inline"`
Interfaces []InterfaceStats `json:"interfaces,omitempty"`
}
// CPUStats contains data about CPU usage.
type CPUStats struct {
// The time at which these stats were updated.
Time metav1.Time `json:"time"`
// Total CPU usage (sum of all cores) averaged over the sample window.
// The "core" unit can be interpreted as CPU core-nanoseconds per second.
// +optional
UsageNanoCores *uint64 `json:"usageNanoCores,omitempty"`
// Cumulative CPU usage (sum of all cores) since object creation.
// +optional
UsageCoreNanoSeconds *uint64 `json:"usageCoreNanoSeconds,omitempty"`
}
// MemoryStats contains data about memory usage.
type MemoryStats struct {
// The time at which these stats were updated.
Time metav1.Time `json:"time"`
// Available memory for use. This is defined as the memory limit - workingSetBytes.
// If memory limit is undefined, the available bytes is omitted.
// +optional
AvailableBytes *uint64 `json:"availableBytes,omitempty"`
// Total memory in use. This includes all memory regardless of when it was accessed.
// +optional
UsageBytes *uint64 `json:"usageBytes,omitempty"`
// The amount of working set memory. This includes recently accessed memory,
// dirty memory, and kernel memory. WorkingSetBytes is <= UsageBytes
// +optional
WorkingSetBytes *uint64 `json:"workingSetBytes,omitempty"`
// The amount of anonymous and swap cache memory (includes transparent
// hugepages).
// +optional
RSSBytes *uint64 `json:"rssBytes,omitempty"`
// Cumulative number of minor page faults.
// +optional
PageFaults *uint64 `json:"pageFaults,omitempty"`
// Cumulative number of major page faults.
// +optional
MajorPageFaults *uint64 `json:"majorPageFaults,omitempty"`
}
// AcceleratorStats contains stats for accelerators attached to the container.
type AcceleratorStats struct {
// Make of the accelerator (nvidia, amd, google etc.)
Make string `json:"make"`
// Model of the accelerator (tesla-p100, tesla-k80 etc.)
Model string `json:"model"`
// ID of the accelerator.
ID string `json:"id"`
// Total accelerator memory.
// unit: bytes
MemoryTotal uint64 `json:"memoryTotal"`
// Total accelerator memory allocated.
// unit: bytes
MemoryUsed uint64 `json:"memoryUsed"`
// Percent of time over the past sample period (10s) during which
// the accelerator was actively processing.
DutyCycle uint64 `json:"dutyCycle"`
}
// VolumeStats contains data about Volume filesystem usage.
type VolumeStats struct {
// Embedded FsStats
FsStats `json:",inline"`
// Name is the name given to the Volume
// +optional
Name string `json:"name,omitempty"`
// Reference to the PVC, if one exists
// +optional
PVCRef *PVCReference `json:"pvcRef,omitempty"`
}
// PVCReference contains enough information to describe the referenced PVC.
type PVCReference struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
}
// FsStats contains data about filesystem usage.
type FsStats struct {
// The time at which these stats were updated.
Time metav1.Time `json:"time"`
// AvailableBytes represents the storage space available (bytes) for the filesystem.
// +optional
AvailableBytes *uint64 `json:"availableBytes,omitempty"`
// CapacityBytes represents the total capacity (bytes) of the filesystems underlying storage.
// +optional
CapacityBytes *uint64 `json:"capacityBytes,omitempty"`
// UsedBytes represents the bytes used for a specific task on the filesystem.
// This may differ from the total bytes used on the filesystem and may not equal CapacityBytes - AvailableBytes.
// e.g. For ContainerStats.Rootfs this is the bytes used by the container rootfs on the filesystem.
// +optional
UsedBytes *uint64 `json:"usedBytes,omitempty"`
// InodesFree represents the free inodes in the filesystem.
// +optional
InodesFree *uint64 `json:"inodesFree,omitempty"`
// Inodes represents the total inodes in the filesystem.
// +optional
Inodes *uint64 `json:"inodes,omitempty"`
// InodesUsed represents the inodes used by the filesystem
// This may not equal Inodes - InodesFree because this filesystem may share inodes with other "filesystems"
// e.g. For ContainerStats.Rootfs, this is the inodes used only by that container, and does not count inodes used by other containers.
InodesUsed *uint64 `json:"inodesUsed,omitempty"`
}
// UserDefinedMetricType defines how the metric should be interpreted by the user.
type UserDefinedMetricType string
const (
// MetricGauge is an instantaneous value. May increase or decrease.
MetricGauge UserDefinedMetricType = "gauge"
// MetricCumulative is a counter-like value that is only expected to increase.
MetricCumulative UserDefinedMetricType = "cumulative"
// MetricDelta is a rate over a time period.
MetricDelta UserDefinedMetricType = "delta"
)
// UserDefinedMetricDescriptor contains metadata that describes a user defined metric.
type UserDefinedMetricDescriptor struct {
// The name of the metric.
Name string `json:"name"`
// Type of the metric.
Type UserDefinedMetricType `json:"type"`
// Display Units for the stats.
Units string `json:"units"`
// Metadata labels associated with this metric.
// +optional
Labels map[string]string `json:"labels,omitempty"`
}
// UserDefinedMetric represents a metric defined and generated by users.
type UserDefinedMetric struct {
UserDefinedMetricDescriptor `json:",inline"`
// The time at which these stats were updated.
Time metav1.Time `json:"time"`
// Value of the metric. Float64s have 53 bit precision.
// We do not foresee any metrics exceeding that value.
Value float64 `json:"value"`
}

View File

@@ -7,7 +7,7 @@ import (
"testing"
"time"
"github.com/bombsimon/logrusr"
"github.com/bombsimon/logrusr/v3"
"github.com/sirupsen/logrus"
"github.com/virtual-kubelet/virtual-kubelet/log"
logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus"
@@ -18,7 +18,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
klogv2 "k8s.io/klog/v2"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
@@ -65,7 +65,7 @@ func wrapE2ETest(ctx context.Context, env *envtest.Environment, f func(context.C
// The following requires that E2E tests are performed *sequentially*
log.L = logger
klogv2.SetLogger(logrusr.NewLogger(sl))
klog.SetLogger(logrusr.New(sl))
f(ctx, t, env)
}
}

View File

@@ -333,7 +333,7 @@ func (e *nodeNotReadyError) Is(target error) bool {
return ok
}
func (e *nodeNotReadyError) As(target error) bool {
func (e *nodeNotReadyError) As(target interface{}) bool {
val, ok := target.(*nodeNotReadyError)
if ok {
*val = *e

View File

@@ -28,7 +28,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
watchutils "k8s.io/client-go/tools/watch"
"k8s.io/klog"
"k8s.io/klog/v2"
)
var (
@@ -38,7 +38,7 @@ var (
const (
// There might be a constant we can already leverage here
testNamespace = "default"
informerResyncPeriod = time.Duration(1 * time.Second)
informerResyncPeriod = 1 * time.Second
testNodeName = "testnode"
podSyncWorkers = 3
)
@@ -232,7 +232,7 @@ type system struct {
}
func (s *system) start(ctx context.Context) error {
go s.pc.Run(ctx, podSyncWorkers) // nolint:errcheck
go s.pc.Run(ctx, podSyncWorkers) //nolint:errcheck
select {
case <-s.pc.Ready():
case <-s.pc.Done():

View File

@@ -6,14 +6,14 @@ import (
"os"
"testing"
klogv1 "k8s.io/klog"
"k8s.io/klog/v2"
)
var enableEnvTest = flag.Bool("envtest", false, "Enable envtest based tests")
func TestMain(m *testing.M) {
flagset := flag.NewFlagSet("klog", flag.PanicOnError)
klogv1.InitFlags(flagset)
klog.InitFlags(flagset)
flagset.VisitAll(func(f *flag.Flag) {
flag.Var(f.Value, "klog."+f.Name, f.Usage)
})

View File

@@ -54,7 +54,7 @@ var (
//
// Note: Implementers can choose to manage a node themselves, in which case
// it is not needed to provide an implementation for this interface.
type NodeProvider interface { // nolint:golint
type NodeProvider interface { //nolint:revive
// Ping checks if the node is still active.
// This is intended to be lightweight as it will be called periodically as a
// heartbeat to keep the node marked as ready in Kubernetes.
@@ -105,7 +105,7 @@ func NewNodeController(p NodeProvider, node *corev1.Node, nodes v1.NodeInterface
}
// NodeControllerOpt are the functional options used for configuring a node
type NodeControllerOpt func(*NodeController) error // nolint:golint
type NodeControllerOpt func(*NodeController) error //nolint:revive
// WithNodeEnableLeaseV1 enables support for v1 leases.
// V1 Leases share all the same properties as v1beta1 leases, except they do not fallback like
@@ -208,7 +208,7 @@ type ErrorHandler func(context.Context, error) error
// NodeController deals with creating and managing a node object in Kubernetes.
// It can register a node with Kubernetes and periodically update its status.
// NodeController manages a single node entity.
type NodeController struct { // nolint:golint
type NodeController struct { //nolint:revive
p NodeProvider
// serverNode must be updated each time it is updated in API Server
@@ -685,10 +685,9 @@ func (t taintsStringer) String() string {
func addNodeAttributes(ctx context.Context, span trace.Span, n *corev1.Node) context.Context {
return span.WithFields(ctx, log.Fields{
"node.UID": string(n.UID),
"node.name": n.Name,
"node.cluster": n.ClusterName,
"node.taints": taintsStringer(n.Spec.Taints),
"node.UID": string(n.UID),
"node.name": n.Name,
"node.taints": taintsStringer(n.Spec.Taints),
})
}

View File

@@ -71,7 +71,7 @@ func testNodeRun(t *testing.T, enableLease bool) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
@@ -189,7 +189,7 @@ func TestNodeCustomUpdateStatusErrorHandler(t *testing.T) {
)
assert.NilError(t, err)
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
timer := time.NewTimer(10 * time.Second)
defer timer.Stop()
@@ -295,7 +295,7 @@ func TestPingAfterStatusUpdate(t *testing.T) {
node, err := NewNodeController(testP, testNode, nodes, opts...)
assert.NilError(t, err)
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
defer func() {
cancel()
<-node.Done()
@@ -363,7 +363,7 @@ func TestBeforeAnnotationsPreserved(t *testing.T) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
@@ -427,7 +427,7 @@ func TestManualConditionsPreserved(t *testing.T) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()

View File

@@ -33,7 +33,7 @@ type authWrapper struct {
// InstrumentAuth wraps the provided Auth in a new instrumented Auth
//
// Note: You would only need this if you rolled your own auth.
// The Auth implementations defined in this package are already instrumented.
// The Auth implementations defined in this package are already instrumented.
func InstrumentAuth(auth Auth) Auth {
if _, ok := auth.(*authWrapper); ok {
// This is already instrumented
@@ -130,8 +130,8 @@ func WebhookAuth(client kubernetes.Interface, nodeName string, opts ...WebhookAu
}
}
cfg.AuthnConfig.TokenAccessReviewClient = client.AuthenticationV1().TokenReviews()
cfg.AuthzConfig.SubjectAccessReviewClient = client.AuthorizationV1().SubjectAccessReviews()
cfg.AuthnConfig.TokenAccessReviewClient = client.AuthenticationV1()
cfg.AuthzConfig.SubjectAccessReviewClient = client.AuthorizationV1()
authn, _, err := cfg.AuthnConfig.New()
if err != nil {

View File

@@ -78,12 +78,14 @@ func (n *Node) runHTTP(ctx context.Context) (func(), error) {
log.G(ctx).Debug("Started TLS listener")
srv := &http.Server{Handler: n.h, TLSConfig: n.tlsConfig}
srv := &http.Server{Handler: n.h, TLSConfig: n.tlsConfig, ReadHeaderTimeout: 30 * time.Second}
go srv.Serve(l) //nolint:errcheck
log.G(ctx).Debug("HTTP server running")
return func() {
/* #nosec */
srv.Close()
/* #nosec */
l.Close()
}, nil
}
@@ -275,7 +277,6 @@ func WithClient(c kubernetes.Interface) NodeOpt {
// Some basic values are set for node status, you'll almost certainly want to modify it.
//
// If client is nil, this will construct a client using ClientsetFromEnv
//
// It is up to the caller to configure auth on the HTTP handler.
func NewNode(name string, newProvider NewProviderFunc, opts ...NodeOpt) (*Node, error) {
cfg := NodeConfig{
@@ -305,6 +306,8 @@ func NewNode(name string, newProvider NewProviderFunc, opts ...NodeOpt) (*Node,
},
}
cfg.Client = defaultClientFromEnv(cfg.KubeconfigPath)
for _, o := range opts {
if err := o(&cfg); err != nil {
return nil, err
@@ -316,11 +319,7 @@ func NewNode(name string, newProvider NewProviderFunc, opts ...NodeOpt) (*Node,
}
if cfg.Client == nil {
var err error
cfg.Client, err = ClientsetFromEnv(cfg.KubeconfigPath)
if err != nil {
return nil, errors.Wrap(err, "error creating clientset from env")
}
return nil, errors.New("no client provided")
}
podInformerFactory := informers.NewSharedInformerFactoryWithOptions(
@@ -428,3 +427,12 @@ func setNodeReady(n *v1.Node) {
return
}
}
func defaultClientFromEnv(kubeconfigPath string) kubernetes.Interface {
client, err := ClientsetFromEnv(kubeconfigPath)
if err != nil {
log.G(context.TODO()).WithError(err).
Warn("Failed to create clientset from env. Ignore this error If you use your own client")
}
return client
}

View File

@@ -6,10 +6,10 @@ import (
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
corev1listers "k8s.io/client-go/listers/core/v1"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
// Provider contains the methods required to implement a virtual-kubelet provider.
@@ -28,7 +28,7 @@ type Provider interface {
RunInContainer(ctx context.Context, namespace, podName, containerName string, cmd []string, attach api.AttachIO) error
// GetStatsSummary gets the stats for the node, including running pods
GetStatsSummary(context.Context) (*statsv1alpha1.Summary, error)
GetStatsSummary(context.Context) (*stats.Summary, error)
}
// ProviderConfig holds objects created by NewNodeFromClient that a provider may need to bootstrap itself.

View File

@@ -4,7 +4,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
)
// WithTLSConfig returns a NodeOpt which creates a base TLSConfig with the default cipher suites and tls min verions.
@@ -31,7 +31,7 @@ func WithTLSConfig(opts ...func(*tls.Config) error) NodeOpt {
// WithCAFromPath makes a TLS config option to set up client auth using the path to a PEM encoded CA cert.
func WithCAFromPath(p string) func(*tls.Config) error {
return func(cfg *tls.Config) error {
pem, err := ioutil.ReadFile(p)
pem, err := os.ReadFile(p)
if err != nil {
return fmt.Errorf("error reading ca cert pem: %w", err)
}

View File

@@ -360,7 +360,7 @@ func TestReCreatePodRace(t *testing.T) {
return true, nil, errors.NewConflict(schema.GroupResource{Group: "", Resource: "pods"}, "nginx", fmt.Errorf("test conflict"))
})
c.client.AddReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(core.DeleteAction).GetName()
name := action.(core.GetAction).GetName()
t.Logf("get pod %s", name)
return true, podCopy, nil
})
@@ -394,7 +394,7 @@ func TestReCreatePodRace(t *testing.T) {
})
c.client.AddReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(core.DeleteAction).GetName()
name := action.(core.GetAction).GetName()
t.Logf("get pod %s", name)
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "pods"}, "nginx")
})
@@ -430,7 +430,7 @@ func TestReCreatePodRace(t *testing.T) {
})
c.client.AddReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(core.DeleteAction).GetName()
name := action.(core.GetAction).GetName()
t.Logf("get pod %s", name)
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "pods"}, "nginx")
})

View File

@@ -7,10 +7,10 @@ import (
"time"
"github.com/virtual-kubelet/virtual-kubelet/internal/podutils"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"gotest.tools/assert"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
const (
@@ -403,7 +403,7 @@ func (ts *EndToEndTestSuite) TestCreatePodWithMandatoryInexistentConfigMap(t *te
// It returns an error if the specified pod is not found.
func findPodInPodStats(summary *stats.Summary, pod *v1.Pod) (int, error) {
for i, p := range summary.Pods {
if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && string(p.PodRef.UID) == string(pod.UID) {
if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && p.PodRef.UID == string(pod.UID) {
return i, nil
}
}

View File

@@ -0,0 +1,266 @@
// Copyright © 2022 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package opentelemetry implements a github.com/virtual-kubelet/virtual-kubelet/trace.Tracer
// using openTelemetry as a backend.
//
// Use this by setting `trace.T = Adapter{}`
//
// For customizing trace provider used in Adapter, set trace provider by
// `otel.SetTracerProvider(*sdktrace.TracerProvider)`. Examples of customize are setting service name,
// use your own exporter (e.g. jaeger, otlp, prometheus, zipkin, and stdout) etc. Do not forget
// to call TracerProvider.Shutdown() when you create your TracerProvider to avoid memory leak.
package opentelemetry
import (
"context"
"fmt"
"sync"
"time"
"go.opentelemetry.io/otel/codes"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/trace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
ot "go.opentelemetry.io/otel/trace"
)
type logLevel string
const (
lDebug logLevel = "DEBUG"
lInfo logLevel = "INFO"
lWarn logLevel = "WARN"
lErr logLevel = "ERROR"
lFatal logLevel = "FATAL"
)
// Adapter implements the trace.Tracer interface for openTelemetry
type Adapter struct{}
// StartSpan creates a new span from openTelemetry using the given name.
func (Adapter) StartSpan(ctx context.Context, name string) (context.Context, trace.Span) {
ctx, ots := otel.Tracer(name).Start(ctx, name)
l := log.G(ctx).WithField("method", name)
s := &span{s: ots, l: l}
ctx = log.WithLogger(ctx, s.Logger())
return ctx, s
}
type span struct {
mu sync.Mutex
s ot.Span
l log.Logger
}
func (s *span) End() {
s.s.End()
}
func (s *span) SetStatus(err error) {
if !s.s.IsRecording() {
return
}
if err == nil {
s.s.SetStatus(codes.Ok, "")
} else {
s.s.SetStatus(codes.Error, err.Error())
}
}
func (s *span) WithField(ctx context.Context, key string, val interface{}) context.Context {
s.mu.Lock()
s.l = s.l.WithField(key, val)
ctx = log.WithLogger(ctx, &logger{s: s.s, l: s.l})
s.mu.Unlock()
if s.s.IsRecording() {
s.s.SetAttributes(makeAttribute(key, val))
}
return ctx
}
func (s *span) WithFields(ctx context.Context, f log.Fields) context.Context {
s.mu.Lock()
s.l = s.l.WithFields(f)
ctx = log.WithLogger(ctx, &logger{s: s.s, l: s.l})
s.mu.Unlock()
if s.s.IsRecording() {
attrs := make([]attribute.KeyValue, 0, len(f))
for k, v := range f {
attrs = append(attrs, makeAttribute(k, v))
}
s.s.SetAttributes(attrs...)
}
return ctx
}
func (s *span) Logger() log.Logger {
return &logger{s: s.s, l: s.l}
}
type logger struct {
s ot.Span
l log.Logger
a []attribute.KeyValue
}
func (l *logger) Debug(args ...interface{}) {
l.logEvent(lDebug, args...)
}
func (l *logger) Debugf(f string, args ...interface{}) {
l.logEventf(lDebug, f, args...)
}
func (l *logger) Info(args ...interface{}) {
l.logEvent(lInfo, args...)
}
func (l *logger) Infof(f string, args ...interface{}) {
l.logEventf(lInfo, f, args...)
}
func (l *logger) Warn(args ...interface{}) {
l.logEvent(lWarn, args...)
}
func (l *logger) Warnf(f string, args ...interface{}) {
l.logEventf(lWarn, f, args...)
}
func (l *logger) Error(args ...interface{}) {
l.logEvent(lErr, args...)
}
func (l *logger) Errorf(f string, args ...interface{}) {
l.logEventf(lErr, f, args...)
}
func (l *logger) Fatal(args ...interface{}) {
l.logEvent(lFatal, args...)
}
func (l *logger) Fatalf(f string, args ...interface{}) {
l.logEventf(lFatal, f, args...)
}
func (l *logger) logEvent(ll logLevel, args ...interface{}) {
msg := fmt.Sprint(args...)
switch ll {
case lDebug:
l.l.Debug(msg)
case lInfo:
l.l.Info(msg)
case lWarn:
l.l.Warn(msg)
case lErr:
l.l.Error(msg)
case lFatal:
l.l.Fatal(msg)
}
if !l.s.IsRecording() {
return
}
l.s.AddEvent(msg, ot.WithTimestamp(time.Now()))
}
func (l *logger) logEventf(ll logLevel, f string, args ...interface{}) {
switch ll {
case lDebug:
l.l.Debugf(f, args...)
case lInfo:
l.l.Infof(f, args...)
case lWarn:
l.l.Warnf(f, args...)
case lErr:
l.l.Errorf(f, args...)
case lFatal:
l.l.Fatalf(f, args...)
}
if !l.s.IsRecording() {
return
}
msg := fmt.Sprintf(f, args...)
l.s.AddEvent(msg, ot.WithTimestamp(time.Now()))
}
func (l *logger) WithError(err error) log.Logger {
return l.WithField("err", err)
}
func (l *logger) WithField(k string, value interface{}) log.Logger {
var attrs []attribute.KeyValue
if l.s.IsRecording() {
attrs = make([]attribute.KeyValue, len(l.a)+1)
copy(attrs, l.a)
attrs[len(attrs)-1] = makeAttribute(k, value)
}
return &logger{s: l.s, a: attrs, l: l.l.WithField(k, value)}
}
func (l *logger) WithFields(fields log.Fields) log.Logger {
var attrs []attribute.KeyValue
if l.s.IsRecording() {
attrs = make([]attribute.KeyValue, len(l.a), len(l.a)+len(fields))
copy(attrs, l.a)
for k, v := range fields {
attrs = append(attrs, makeAttribute(k, v))
}
}
return &logger{s: l.s, a: attrs, l: l.l.WithFields(fields)}
}
func makeAttribute(key string, val interface{}) (attr attribute.KeyValue) {
switch v := val.(type) {
case string:
return attribute.String(key, v)
// case []string:
// return attribute.StringSlice(key, v)
case fmt.Stringer:
return attribute.Stringer(key, v)
case int:
return attribute.Int(key, v)
// case []int:
// return attribute.IntSlice(key, v)
case int64:
return attribute.Int64(key, v)
case float64:
return attribute.Float64(key, v)
// case []float64:
// return attribute.Float64Slice(key, v)
// case []int64:
// return attribute.Int64Slice(key, v)
case bool:
return attribute.Bool(key, v)
// case []bool:
// return attribute.BoolSlice(key, v)
case error:
if v == nil {
attribute.String(key, "")
}
return attribute.String(key, v.Error())
default:
return attribute.String(key, fmt.Sprintf("%+v", val))
}
}

View File

@@ -0,0 +1,622 @@
// Copyright © 2022 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opentelemetry
import (
"context"
"fmt"
"sort"
"strconv"
"testing"
"time"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/log"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
sdktracetest "go.opentelemetry.io/otel/sdk/trace/tracetest"
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"gotest.tools/assert"
"gotest.tools/assert/cmp"
)
func TestStartSpan(t *testing.T) {
t.Run("addField", func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
a := Adapter{}
_, s := a.StartSpan(ctx, "name")
s.End()
})
}
func TestSetStatus(t *testing.T) {
testCases := []struct {
description string
spanName string
inputStatus error
expectedCode codes.Code
expectedDescription string
}{
{
description: "error status",
spanName: "test",
inputStatus: errors.New("fake msg"),
expectedCode: codes.Error,
expectedDescription: "fake msg",
}, {
description: "non-error status",
spanName: "test",
inputStatus: nil,
expectedCode: codes.Ok,
expectedDescription: "",
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
s.SetStatus(tt.inputStatus)
assert.Assert(t, s.s.IsRecording())
s.End()
assert.Assert(t, !s.s.IsRecording())
assert.Assert(t, e.status == tt.expectedCode)
assert.Assert(t, e.statusMessage == tt.expectedDescription)
s.SetStatus(tt.inputStatus) // should not be panic even if span is ended.
})
}
}
func TestWithField(t *testing.T) {
type field struct {
key string
value interface{}
}
testCases := []struct {
description string
spanName string
fields []field
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey2", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
}, {
description: "duplicated fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey1", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
for _, f := range tt.fields {
ctx = s.WithField(ctx, f.key, f.value)
}
s.End()
assert.Assert(t, len(e.attributes) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(e.attributes, a)
}
})
}
}
func TestWithFields(t *testing.T) {
testCases := []struct {
description string
spanName string
fields log.Fields
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: log.Fields{"testKey1": "value1"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: log.Fields{"testKey1": "value1", "testKey2": "value2"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
_ = s.WithFields(ctx, tt.fields)
s.End()
assert.Assert(t, len(e.attributes) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(e.attributes, a)
}
})
}
}
func TestLog(t *testing.T) {
testCases := []struct {
description string
spanName string
logLevel logLevel
fields log.Fields
msg string
expectedEvents []sdktrace.Event
expectedAttributes []attribute.KeyValue
}{
{
description: "debug",
spanName: "test",
logLevel: lDebug,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "info",
spanName: "test",
logLevel: lInfo,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "warn",
spanName: "test",
logLevel: lWarn,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "error",
spanName: "test",
logLevel: lErr,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "fatal",
spanName: "test",
logLevel: lFatal,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []sdktrace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
switch tt.logLevel {
case lDebug:
l.WithFields(tt.fields).Debug(tt.msg)
case lInfo:
l.WithFields(tt.fields).Info(tt.msg)
case lWarn:
l.WithFields(tt.fields).Warn(tt.msg)
case lErr:
l.WithFields(tt.fields).Error(tt.msg)
case lFatal:
l.WithFields(tt.fields).Fatal(tt.msg)
}
s.End()
assert.Assert(t, len(e.events) == len(tt.expectedEvents))
for i, event := range tt.expectedEvents {
assert.Assert(t, e.events[i].Name == event.Name)
assert.Assert(t, !e.events[i].Time.IsZero())
}
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func TestLogf(t *testing.T) {
testCases := []struct {
description string
spanName string
logLevel logLevel
msg string
fields log.Fields
args []interface{}
expectedEvents []sdktrace.Event
expectedAttributes []attribute.KeyValue
}{
{
description: "debug",
spanName: "test",
logLevel: lDebug,
msg: "k1: %s, k2: %v, k3: %d, k4: %v",
fields: map[string]interface{}{"k1": "test", "k2": []string{"test"}, "k3": 1, "k4": []int{1}},
args: []interface{}{"test", []string{"test"}, int(1), []int{1}},
expectedEvents: []sdktrace.Event{{Name: "k1: test, k2: [test], k3: 1, k4: [1]"}},
expectedAttributes: []attribute.KeyValue{
attribute.String("k1", "test"),
attribute.String("k2", fmt.Sprintf("%+v", []string{"test"})),
attribute.Int("k3", 1),
attribute.String("k4", fmt.Sprintf("%+v", []int{1})),
},
}, {
description: "info",
spanName: "test",
logLevel: lInfo,
msg: "k1: %d, k2: %v, k3: %f, k4: %v",
fields: map[string]interface{}{"k1": int64(3), "k2": []int64{4}, "k3": float64(2), "k4": []float64{4}},
args: []interface{}{int64(3), []int64{4}, float64(2), []float64{4}},
expectedEvents: []sdktrace.Event{{Name: "k1: 3, k2: [4], k3: 2.000000, k4: [4]"}},
expectedAttributes: []attribute.KeyValue{
attribute.Int64("k1", 1),
attribute.String("k2", fmt.Sprintf("%+v", []int64{2})),
attribute.Float64("k3", 3),
attribute.String("k4", fmt.Sprintf("%+v", []float64{4})),
},
}, {
description: "warn",
spanName: "test",
logLevel: lWarn,
msg: "k1: %v, k2: %v",
fields: map[string]interface{}{"k1": map[int]int{1: 1}, "k2": num(1)},
args: []interface{}{map[int]int{1: 1}, num(1)},
expectedEvents: []sdktrace.Event{{Name: "k1: map[1:1], k2: 1"}},
expectedAttributes: []attribute.KeyValue{
attribute.String("k1", "{1:1}"),
attribute.Stringer("k2", num(1)),
},
}, {
description: "error",
spanName: "test",
logLevel: lErr,
msg: "k1: %t, k2: %v, k3: %s",
fields: map[string]interface{}{"k1": true, "k2": []bool{true}, "k3": errors.New("fake")},
args: []interface{}{true, []bool{true}, errors.New("fake")},
expectedEvents: []sdktrace.Event{{Name: "k1: true, k2: [true], k3: fake"}},
expectedAttributes: []attribute.KeyValue{
attribute.Bool("k1", true),
attribute.String("k2", fmt.Sprintf("%+v", []bool{true})),
attribute.String("k3", "fake"),
},
}, {
description: "fatal",
spanName: "test",
logLevel: lFatal,
expectedEvents: []sdktrace.Event{{Name: ""}},
expectedAttributes: []attribute.KeyValue{},
},
}
for _, tt := range testCases {
tt := tt
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
switch tt.logLevel {
case lDebug:
l.WithFields(tt.fields).Debugf(tt.msg, tt.args...)
case lInfo:
l.WithFields(tt.fields).Infof(tt.msg, tt.args...)
case lWarn:
l.WithFields(tt.fields).Warnf(tt.msg, tt.args...)
case lErr:
l.WithFields(tt.fields).Errorf(tt.msg, tt.args...)
case lFatal:
l.WithFields(tt.fields).Fatalf(tt.msg, tt.args...)
}
s.End()
assert.Assert(t, len(e.events) == len(tt.expectedEvents))
for i, event := range tt.expectedEvents {
event := event
i := i
t.Run(fmt.Sprintf("event %s", event.Name), func(t *testing.T) {
assert.Check(t, cmp.Equal(e.events[i].Name, event.Name))
assert.Check(t, !e.events[i].Time.IsZero())
})
}
assert.Assert(t, cmp.Len(fl.a, len(tt.expectedAttributes)))
sort.Slice(tt.expectedAttributes, func(i, j int) bool {
return tt.expectedAttributes[i].Key < tt.expectedAttributes[j].Key
})
sort.Slice(fl.a, func(i, j int) bool {
return fl.a[i].Key < fl.a[j].Key
})
for i, a := range tt.expectedAttributes {
a := a
t.Run(fmt.Sprintf("attribute %s", a.Key), func(t *testing.T) {
assert.Assert(t, fl.a[i].Key == a.Key)
assert.Assert(t, cmp.Equal(fl.a[i].Value.Type(), a.Value.Type()))
// TODO: check value, this is harder to do since the types are unknown
})
}
l.Debugf(tt.msg, tt.args) // should not panic even if span is finished
})
}
}
func TestLogWithField(t *testing.T) {
type field struct {
key string
value interface{}
}
testCases := []struct {
description string
spanName string
fields []field
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey2", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
}, {
description: "duplicated fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey1", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
for _, f := range tt.fields {
l.WithField(f.key, f.value).Info("")
}
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
l.Debug("") // should not panic even if span is finished
})
}
}
func TestLogWithError(t *testing.T) {
testCases := []struct {
description string
spanName string
err error
expectedAttributes []attribute.KeyValue
}{
{
description: "normal",
spanName: "test",
err: errors.New("fake"),
expectedAttributes: []attribute.KeyValue{{Key: "err", Value: attribute.StringValue("fake")}},
}, {
description: "nil error",
spanName: "test",
err: nil,
expectedAttributes: []attribute.KeyValue{{Key: "err", Value: attribute.StringValue("")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
l.WithError(tt.err).Error("")
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func TestLogWithFields(t *testing.T) {
testCases := []struct {
description string
spanName string
fields log.Fields
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: log.Fields{"testKey1": "value1"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: log.Fields{"testKey1": "value1", "testKey2": "value2"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
l.WithFields(tt.fields).Debug("")
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func setupSuite() (func(provider *sdktrace.TracerProvider), *sdktrace.TracerProvider, *sdktracetest.InMemoryExporter) {
r := NewResource("virtual-kubelet", "1.2.3")
e := sdktracetest.NewInMemoryExporter()
p := sdktrace.NewTracerProvider(
sdktrace.WithSyncer(e),
sdktrace.WithResource(r),
sdktrace.WithSampler(sdktrace.AlwaysSample()),
)
otel.SetTracerProvider(p)
// Return a function to teardown the test
return func(provider *sdktrace.TracerProvider) {
_ = p.Shutdown(context.Background())
}, p, e
}
func NewResource(name, version string) *resource.Resource {
return resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String(name),
semconv.ServiceVersionKey.String(version),
)
}
type fakeLogger struct {
a []attribute.KeyValue
}
func (*fakeLogger) Debug(...interface{}) {}
func (*fakeLogger) Debugf(string, ...interface{}) {}
func (*fakeLogger) Info(...interface{}) {}
func (*fakeLogger) Infof(string, ...interface{}) {}
func (*fakeLogger) Warn(...interface{}) {}
func (*fakeLogger) Warnf(string, ...interface{}) {}
func (*fakeLogger) Error(...interface{}) {}
func (*fakeLogger) Errorf(string, ...interface{}) {}
func (*fakeLogger) Fatal(...interface{}) {}
func (*fakeLogger) Fatalf(string, ...interface{}) {}
func (l *fakeLogger) WithField(k string, v interface{}) log.Logger {
l.a = append(l.a, makeAttribute(k, v))
return l
}
func (l *fakeLogger) WithFields(fs log.Fields) log.Logger {
for k, v := range fs {
l.a = append(l.a, makeAttribute(k, v))
}
return l
}
func (l *fakeLogger) WithError(err error) log.Logger {
l.a = append(l.a, makeAttribute("err", err))
return l
}
type num int
func (i num) String() string {
return strconv.Itoa(int(i))
}