Compare commits

..

77 Commits

Author SHA1 Message Date
fnuarnav
2c155accb7 Prometheus metrics are encoded as text, not JSON (#1101)
Co-authored-by: Sanchit Mehta <sanchit.mehta602@gmail.com>
2023-04-06 08:03:43 +01:00
Salvatore Cirone
9c32bfb0ae Add support for Attach API functionality (#1090)
Co-authored-by: Pablo Borrelli <pablo.borrelli0@gmail.com>
2023-03-31 08:51:50 -07:00
Jackie Lan
b7030b9dc5 Support advanced capacity and providerID settings in mock provider 2023-03-28 13:22:44 +01:00
fnuarnav
a457d445a3 feat: Implement new metrics endpoint for k8s 1.24+ (#1082) 2023-03-28 13:01:37 +01:00
fnuarnav
b70ee9b6dd New metrics API proposal (#1075) 2023-03-28 12:39:42 +01:00
dependabot[bot]
8bf7691f59 Bump github.com/prometheus/client_golang from 1.13.0 to 1.14.0 (#1095)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-27 11:10:35 -07:00
dependabot[bot]
d87cc6ee1a Bump k8s.io/klog/v2 from 2.80.1 to 2.90.1 (#1091)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Heba Elayoty <31887807+helayoty@users.noreply.github.com>
2023-03-20 23:45:06 +00:00
dependabot[bot]
2b6bd337cc Bump actions/setup-go from 3 to 4 (#1093)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-03-20 16:40:03 -07:00
Heba Elayoty
a2070739bb fix: Fix missing Backoff property for WebHookAuth (#1089) 2023-03-16 02:24:23 +00:00
dependabot[bot]
a90f71b9a4 Bump golang.org/x/time from 0.0.0-20220609170525-579cf78fd858 to 0.3.0
Bumps [golang.org/x/time](https://github.com/golang/time) from 0.0.0-20220609170525-579cf78fd858 to 0.3.0.
- [Release notes](https://github.com/golang/time/releases)
- [Commits](https://github.com/golang/time/commits/v0.3.0)

---
updated-dependencies:
- dependency-name: golang.org/x/time
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-14 10:52:11 +00:00
Pires
dcbb102f53 cmd: fix nil pointer during node setup 2023-03-14 10:44:31 +00:00
dependabot[bot]
90f81e9cc7 Bump k8s.io/api from 0.25.0 to 0.26.2 (#1078)
Co-authored-by: Pires <pjpires@gmail.com>
2023-03-13 11:10:19 +00:00
Pires
eb5d959215 replace deprecated pointer funcs 2023-03-13 10:56:38 +00:00
dependabot[bot]
109b1eed8b Bump k8s.io/apimachinery from 0.25.0 to 0.26.2
Bumps [k8s.io/apimachinery](https://github.com/kubernetes/apimachinery) from 0.25.0 to 0.26.2.
- [Release notes](https://github.com/kubernetes/apimachinery/releases)
- [Commits](https://github.com/kubernetes/apimachinery/compare/v0.25.0...v0.26.2)

---
updated-dependencies:
- dependency-name: k8s.io/apimachinery
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-13 10:56:38 +00:00
Xiang Li
5e4340a4a4 Add vendor directory to gitignore (#957)
Co-authored-by: Heba Elayoty <31887807+helayoty@users.noreply.github.com>
2023-02-28 23:06:56 -08:00
Heba Elayoty
b8f8449177 Update google calendar and meeting time 2023-02-28 22:31:48 -08:00
dependabot[bot]
d23c36eec6 Bump golang.org/x/net from 0.0.0-20220722155237-a158d28d115b to 0.7.0 (#1077)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-02-28 21:58:33 -08:00
Brian Goff
7bcacb1cab Merge pull request #1060 from edigaryev/patch-1
README.md: add go.dev badge
2022-12-15 15:15:03 -08:00
Nikolay Edigaryev
a610358f56 README.md: add go.dev badge 2022-12-09 22:34:17 +04:00
dependabot[bot]
704b01eac6 Bump sigs.k8s.io/controller-runtime from 0.7.1 to 0.13.0 (#1034)
Bumps [sigs.k8s.io/controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) from 0.7.1 to 0.13.0.
- [Release notes](https://github.com/kubernetes-sigs/controller-runtime/releases)
- [Changelog](https://github.com/kubernetes-sigs/controller-runtime/blob/master/RELEASE.md)
- [Commits](https://github.com/kubernetes-sigs/controller-runtime/compare/v0.7.1...v0.13.0)

---
updated-dependencies:
- dependency-name: sigs.k8s.io/controller-runtime
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2022-10-14 13:56:42 -07:00
Heba Elayoty
94999fc0b6 Merge pull request #1035 from virtual-kubelet/dependabot/go_modules/k8s.io/klog/v2-2.80.1
Bump k8s.io/klog/v2 from 2.2.0 to 2.80.1
2022-10-14 13:32:39 -07:00
dependabot[bot]
9d8005e4b8 Bump k8s.io/klog/v2 from 2.2.0 to 2.80.1
Bumps [k8s.io/klog/v2](https://github.com/kubernetes/klog) from 2.2.0 to 2.80.1.
- [Release notes](https://github.com/kubernetes/klog/releases)
- [Changelog](https://github.com/kubernetes/klog/blob/main/RELEASE.md)
- [Commits](https://github.com/kubernetes/klog/compare/v2.2.0...v2.80.1)

---
updated-dependencies:
- dependency-name: k8s.io/klog/v2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-10-10 17:37:07 +00:00
Brian Goff
a486eaffd2 Merge pull request #1046 from cpuguy83/codeql_perms
codeql: Add explicit permissions
2022-10-10 10:36:08 -07:00
Brian Goff
d66366ba96 codeql: Add explicit permissions
Codeql requires write access to security-events, but our default action
token (rightly) only has read permissions.
This adds the explicit request for write access.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-10 17:29:32 +00:00
Brian Goff
bb4e20435d Merge pull request #1042 from cpuguy83/bumps
Bump packages
2022-10-08 10:51:33 -07:00
Brian Goff
6feafcf018 Remove klogv2 alias
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
5db1443e33 Fix apparent bad copy/pasta in test causing panic
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
2c4442b17f Fix linting issues and update make lint target.
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
70848cfdae Bump k8s deps to v0.24
This requires dropping otel down to v0.20 because the apiserver package
is importing it and some packages moved around with otel v1.
Even k8s v0.25 still uses this old version of otel, so we are stuck for
a bit (v0.26 will, as of now, use a newer otel version).

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
c668ae6ab6 Bump problematic deps
Changes in klog and logr have made automatic bumps from dependabot
problematic.
We also shouldn't need klogv1 so removed that.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
f7f8b45117 Bump go version to 1.18 in dockerfile
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-10-07 23:21:47 +00:00
Brian Goff
5001135763 Merge pull request #1043 from LuBingtan/add-default-client
Add default client in mock provider
2022-09-30 09:08:50 -07:00
lubingtan
67be3c681d Add default client
Signed-off-by: lubingtan <bingtlu@ebay.com>
2022-09-30 09:47:22 +08:00
Brian Goff
fca742986c Merge pull request #1036 from virtual-kubelet/dependabot/github_actions/actions/checkout-3
Bump actions/checkout from 2 to 3
2022-09-12 11:07:19 -07:00
dependabot[bot]
db7f53c1ca Bump actions/checkout from 2 to 3
Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-09-12 17:33:34 +00:00
Brian Goff
c63b8f0dec Merge pull request #994 from pigletfly/bump-go
bump golang to 1.17
2022-09-12 09:38:09 -07:00
pigletfly
83fbc0c687 bump golang to 1.17
Signed-off-by: pigletfly <wangbing.adam@gmail.com>
2022-09-07 09:34:00 +08:00
Brian Goff
d2523fe808 Merge pull request #1031 from cpuguy83/fix_envtest_name
Fix typo in job name
2022-08-31 14:03:00 -07:00
Brian Goff
7ee822ec6d Fix typo in job name 2022-08-31 21:02:12 +00:00
Brian Goff
0b70fb1958 Merge pull request #1025 from virtual-kubelet/dependabot/go_modules/contrib.go.opencensus.io/exporter/jaeger-0.2.1
Bump contrib.go.opencensus.io/exporter/jaeger from 0.1.0 to 0.2.1
2022-08-31 13:15:00 -07:00
dependabot[bot]
a25c1def45 Bump contrib.go.opencensus.io/exporter/jaeger from 0.1.0 to 0.2.1
Bumps [contrib.go.opencensus.io/exporter/jaeger](https://github.com/census-ecosystem/opencensus-go-exporter-jaeger) from 0.1.0 to 0.2.1.
- [Release notes](https://github.com/census-ecosystem/opencensus-go-exporter-jaeger/releases)
- [Commits](https://github.com/census-ecosystem/opencensus-go-exporter-jaeger/compare/v0.1.0...v0.2.1)

---
updated-dependencies:
- dependency-name: contrib.go.opencensus.io/exporter/jaeger
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:58:59 +00:00
Brian Goff
d682bb3894 Merge pull request #1028 from cpuguy83/codeql_nopr
Only run codeql on pushes to master, not pr's
2022-08-31 12:57:42 -07:00
Brian Goff
6198b02423 Only run codeql on pushes to master, not pr's
These are extremely slow and probably very expensive for someone.
We don't need these running on PR's which have constant pushes, rebases,
etc.

The activity on the repo is slow enough we can fix-up things after
codeql runs on master.

Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 19:56:04 +00:00
Brian Goff
9d94eea9e9 Merge pull request #1023 from virtual-kubelet/dependabot/github_actions/github/codeql-action-2
Bump github/codeql-action from 1 to 2
2022-08-31 12:48:19 -07:00
dependabot[bot]
de4fe42586 Bump github/codeql-action from 1 to 2
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 1 to 2.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/github/codeql-action/compare/v1...v2)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:42:26 +00:00
Brian Goff
305e33bfbf Merge pull request #1022 from virtual-kubelet/dependabot/github_actions/actions/setup-go-3
Bump actions/setup-go from 2 to 3
2022-08-31 12:41:50 -07:00
dependabot[bot]
00d8340a64 Bump actions/setup-go from 2 to 3
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 2 to 3.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v2...v3)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:36:55 +00:00
Brian Goff
5e5a842dbb Merge pull request #1021 from cpuguy83/dependabot_actions
dependabot: update github actions
2022-08-31 12:36:37 -07:00
Brian Goff
aa94284712 dependabot: update github actions
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 19:35:43 +00:00
Brian Goff
d87dd1c79f Merge pull request #1018 from virtual-kubelet/dependabot/go_modules/contrib.go.opencensus.io/exporter/ocagent-0.7.0
Bump contrib.go.opencensus.io/exporter/ocagent from 0.4.12 to 0.7.0
2022-08-31 12:31:16 -07:00
dependabot[bot]
ab3615b8d7 Bump contrib.go.opencensus.io/exporter/ocagent from 0.4.12 to 0.7.0
Bumps [contrib.go.opencensus.io/exporter/ocagent](https://github.com/census-ecosystem/opencensus-go-exporter-ocagent) from 0.4.12 to 0.7.0.
- [Release notes](https://github.com/census-ecosystem/opencensus-go-exporter-ocagent/releases)
- [Commits](https://github.com/census-ecosystem/opencensus-go-exporter-ocagent/compare/v0.4.12...v0.7.0)

---
updated-dependencies:
- dependency-name: contrib.go.opencensus.io/exporter/ocagent
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:26:33 +00:00
Brian Goff
22d2416dc4 Merge pull request #1020 from virtual-kubelet/dependabot/go_modules/github.com/prometheus/client_golang-1.13.0
Bump github.com/prometheus/client_golang from 1.7.1 to 1.13.0
2022-08-31 12:25:37 -07:00
dependabot[bot]
e1c6e80a7a Bump github.com/prometheus/client_golang from 1.7.1 to 1.13.0
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.7.1 to 1.13.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.7.1...v1.13.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 19:20:54 +00:00
Brian Goff
1ed3180ec2 Merge pull request #1019 from virtual-kubelet/dependabot/go_modules/github.com/spf13/cobra-1.5.0
Bump github.com/spf13/cobra from 1.0.0 to 1.5.0
2022-08-31 12:19:55 -07:00
dependabot[bot]
97452b493f Bump github.com/spf13/cobra from 1.0.0 to 1.5.0
Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.0.0 to 1.5.0.
- [Release notes](https://github.com/spf13/cobra/releases)
- [Commits](https://github.com/spf13/cobra/compare/v1.0.0...v1.5.0)

---
updated-dependencies:
- dependency-name: github.com/spf13/cobra
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-31 18:40:59 +00:00
Brian Goff
6363360781 Merge pull request #1004 from mxplusb/feature/dependabot
added basic dependabot integration.
2022-08-31 11:40:07 -07:00
Brian Goff
44d0df547d Merge branch 'master' into feature/dependabot 2022-08-31 11:30:14 -07:00
Brian Goff
10a7559b83 Merge pull request #1006 from yabuchan/opentelemetry
Add opentelemetry as tracing provider
2022-08-31 11:29:56 -07:00
Brian Goff
b98ba29b52 Merge branch 'master' into opentelemetry 2022-08-31 11:09:42 -07:00
Brian Goff
008fe17b91 Merge pull request #1015 from cpuguy83/gh_actions
Add github actions
2022-08-31 11:00:53 -07:00
Brian Goff
ec1fe2070a Merge pull request #1013 from solarkennedy/k8s_1.24_compat
Removed deprecated node Clustername
2022-08-30 22:37:43 -07:00
Brian Goff
48e29d75fc Remove circleci
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 00:58:56 +00:00
Brian Goff
f617ccebc5 Fixup some new lint issues
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 00:58:56 +00:00
Brian Goff
433e0bbd20 Add github actions
Signed-off-by: Brian Goff <cpuguy83@gmail.com>
2022-08-31 00:58:51 +00:00
Kyle Anderson
a8f253088c Removed deprecated node Clustername
With this one line, vk fails to build against k8s 1.24 libs.

The comment says:

    // Deprecated: ClusterName is a legacy field that was always cleared by
    // the system and never used; it will be removed completely in 1.25.

Seems to be removed in 1.24 though.
2022-08-25 19:39:11 -07:00
yabuchan
38e662129d remove warning from golinter 2022-07-08 10:23:17 +09:00
yabuchan
95bdbdec0d reflect review comments 2022-07-08 00:18:58 +09:00
yabuchan
1958686b4a remove unnecessary lines 2022-07-04 21:38:38 +09:00
yabuchan
36397f80c2 downgrade opentelemetry to avoid the library conflicts 2022-07-04 19:15:45 +09:00
yabuchan
801b44543c do not save attributes in logger, add fake logger in unit test 2022-07-04 13:52:51 +09:00
yabuchan
853f9ead1c add sampler in test 2022-07-04 00:22:57 +09:00
yabuchan
915445205f add logic for otel 2022-07-03 23:30:10 +09:00
Sienna Lloyd
2b7e4c9dc6 added basic dependabot integration.
Signed-off-by: Sienna Lloyd <sienna.lloyd@hey.com>
2022-06-23 11:30:16 -06:00
Brian Goff
410e05878a Merge pull request #985 from cbsfly/fix-klog
Change klog flag version to v2
2021-11-23 07:28:04 -08:00
chenbingshen.invoker
70c7745444 Change klog flag version to v2 2021-11-18 18:29:26 +08:00
Brian Goff
269ef14a7a Merge pull request #984 from pigletfly/fix-readme
Add scrape pod metrics
2021-11-16 13:54:57 -08:00
pigletfly
faaf14c68d Add scrape pod metrics 2021-11-12 10:56:46 +08:00
50 changed files with 2944 additions and 720 deletions

View File

@@ -1,167 +0,0 @@
version: 2.0
jobs:
validate:
resource_class: xlarge
docker:
- image: circleci/golang:1.15
environment:
GO111MODULE: "on"
GOPROXY: https://proxy.golang.org
working_directory: /go/src/github.com/virtual-kubelet/virtual-kubelet
steps:
- checkout
- restore_cache:
keys:
- validate-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
- run:
name: go vet
command: V=1 CI=1 make vet
- run:
name: Lint
command: make lint
- run:
name: Dependencies
command: scripts/validate/gomod.sh
- save_cache:
key: validate-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
test:
resource_class: xlarge
docker:
- image: circleci/golang:1.16
environment:
GO111MODULE: "on"
working_directory: /go/src/github.com/virtual-kubelet/virtual-kubelet
steps:
- checkout
- restore_cache:
keys:
- test-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
- run:
name: Build
command: V=1 make build
- run: go install gotest.tools/gotestsum@latest
- run:
name: Tests
environment:
GOTEST: gotestsum -- -timeout=9m
GOTESTSUM_JUNITFILE: output/unit/results.xml
GODEBUG: cgocheck=2
command: |
mkdir -p output/unit
V=1 make test envtest
- save_cache:
key: test-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod"
- store_test_results:
path: output
e2e:
machine:
image: ubuntu-1604:202010-01
working_directory: /home/circleci/go/src/github.com/virtual-kubelet/virtual-kubelet
environment:
CHANGE_MINIKUBE_NONE_USER: true
GOPATH: /home/circleci/go
KUBECONFIG: /home/circleci/.kube/config
KUBERNETES_VERSION: v1.20.1
MINIKUBE_HOME: /home/circleci
MINIKUBE_VERSION: v1.16.0
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
SKAFFOLD_VERSION: v1.17.2
GO111MODULE: "on"
steps:
- checkout
- run:
name: Install kubectl
command: |
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/${KUBERNETES_VERSION}/bin/linux/amd64/kubectl
chmod +x kubectl
sudo mv kubectl /usr/local/bin/
mkdir -p ${HOME}/.kube
touch ${HOME}/.kube/config
- run:
name: Install Skaffold
command: |
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/${SKAFFOLD_VERSION}/skaffold-linux-amd64
chmod +x skaffold
sudo mv skaffold /usr/local/bin/
- run:
name: Install Minikube dependencies
command: |
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet # systemd unit is disabled
- run:
name: Install Minikube
command: |
curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
chmod +x minikube
sudo mv minikube /usr/local/bin/
- run:
name: Start Minikube
command: |
sudo -E minikube start --vm-driver=none --cpus 2 --memory 2048 --kubernetes-version=${KUBERNETES_VERSION}
- run:
name: Wait for Minikube
command: |
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}';
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1;
done
- run:
name: Watch pods
command: kubectl get pods -o json --watch
background: true
- run:
name: Watch nodes
command: kubectl get nodes -o json --watch
background: true
- restore_cache:
keys:
- e2e-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}-2
- run:
name: Run the end-to-end test suite
environment:
GOTEST: gotestsum --
command: |
mkdir $HOME/.go
export PATH=$HOME/.go/bin:${PATH}
curl -fsSL -o "/tmp/go.tar.gz" "https://dl.google.com/go/go1.16.4.linux-amd64.tar.gz"
tar -C $HOME/.go --strip-components=1 -xzf "/tmp/go.tar.gz"
go version
mkdir -p output/e2e
export GOTESTSUM_JUNITFILE="$(pwd)/output/e2e/results.xml"
export PATH="${GOPATH}/bin:${PATH}"
go install gotest.tools/gotestsum@latest
make e2e
- store_test_results:
path: output
- save_cache:
key: e2e-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}-2
paths:
- "/home/circleci/go/pkg/mod"
- run:
name: Collect logs on failure from vkubelet-mock-0
command: |
kubectl logs vkubelet-mock-0
when: on_fail
workflows:
version: 2
validate_and_test:
jobs:
- validate
- test
- e2e:
requires:
- validate
- test

View File

@@ -1,4 +1,6 @@
.vscode
private.env
*.private.*
providers/azurebatch/deployment/
providers/azurebatch/deployment/
Dockerfile
.dockerignore

10
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

110
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,110 @@
name: CI
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches: [master]
pull_request:
env:
GO_VERSION: "1.18"
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v3
- uses: golangci/golangci-lint-action@v3
with:
version: v1.48.0
args: --timeout=5m
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v3
- name: Run Tests
run: make test
env-tests:
name: Envtest Tests
runs-on: ubuntu-latest
timeout-minutes: 10
steps:
- name: Checkout repository
uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- uses: actions/checkout@v3
- name: Run Tests
run: make envtest
e2e:
name: E2E
runs-on: ubuntu-22.04
timeout-minutes: 10
env:
CHANGE_MINIKUBE_NONE_USER: true
KUBERNETES_VERSION: v1.20.1
MINIKUBE_HOME: /home/circleci
MINIKUBE_VERSION: v1.16.0
MINIKUBE_WANTUPDATENOTIFICATION: false
MINIKUBE_WANTREPORTERRORPROMPT: false
SKAFFOLD_VERSION: v1.17.2
GO111MODULE: "on"
steps:
- uses: actions/setup-go@v4
with:
go-version: ${{ env.GO_VERSION }}
- name: Checkout repository
uses: actions/checkout@v3
- name: Install Skaffold
run: |
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/${SKAFFOLD_VERSION}/skaffold-linux-amd64
chmod +x skaffold
sudo mv skaffold /usr/local/bin/
echo /usr/local/bin >> $GITHUB_PATH
- name: Install Minikube dependencies
run: |
sudo apt-get update && sudo apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt-get update
sudo apt-get install -y kubelet # systemd unit is disabled
- name: Install Minikube
run: |
curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
chmod +x minikube
sudo mv minikube /usr/local/bin/
- name: Start Minikube
run: |
sudo -E PATH=$PATH minikube start --vm-driver=none --cpus 2 --memory 2048 --kubernetes-version=${KUBERNETES_VERSION}
- name: Wait for Minikube
run: |
JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}';
until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do
sleep 1;
done
- name: Run Tests
run: make e2e

View File

@@ -1,56 +1,59 @@
name: "CodeQL"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
branches: [master]
schedule:
- cron: '19 18 * * 3'
- cron: "19 18 * * 3"
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
language: ["go"]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

2
.gitignore vendored
View File

@@ -41,3 +41,5 @@ loganalytics.json
**/terraform-provider-kubernetes
**/*.tfstate*
debug
vendor/

View File

@@ -2,6 +2,8 @@ linter-settings:
lll:
line-length: 200
timeout: 10m
run:
skip-dirs:
# This directory contains copy code from upstream kubernetes/kubernetes, skip it.
@@ -12,20 +14,26 @@ run:
linters:
enable:
- errcheck
- govet
- ineffassign
- golint
- goconst
- goimports
- unused
- structcheck
- varcheck
- deadcode
- staticcheck
- unconvert
- gofmt
- goimports
- ineffassign
- vet
- unused
- misspell
- nolintlint
- gocritic
- gosec
- exportloopref # Checks for pointers to enclosing loop variables
- tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17
linters-settings:
gosec:
excludes:
- G304
issues:
exclude-use-default: false
exclude:
# EXC0001 errcheck: Almost all programs ignore errors on these functions and in most cases it's ok
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). (is not checked|Errors unhandled)

View File

@@ -1,4 +1,6 @@
FROM golang:1.15 as builder
ARG GOLANG_CI_LINT_VERSION
FROM golang:1.18 as builder
ENV PATH /go/bin:/usr/local/go/bin:$PATH
ENV GOPATH /go
COPY . /go/src/github.com/virtual-kubelet/virtual-kubelet
@@ -7,6 +9,22 @@ ARG BUILD_TAGS=""
RUN make VK_BUILD_TAGS="${BUILD_TAGS}" build
RUN cp bin/virtual-kubelet /usr/bin/virtual-kubelet
FROM golangci/golangci-lint:${GOLANG_CI_LINT_VERSION} as lint
WORKDIR /app
COPY go.mod ./
COPY go.sum ./
RUN \
--mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
go mod download
COPY . .
ARG OUT_FORMAT
RUN \
--mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg/mod \
--mount=type=cache,target=/root/.cache/golangci-lint \
golangci-lint run -v --out-format="${OUT_FORMAT:-colored-line-number}"
FROM scratch
COPY --from=builder /usr/bin/virtual-kubelet /usr/bin/virtual-kubelet
COPY --from=builder /etc/ssl/certs/ /etc/ssl/certs

View File

@@ -183,6 +183,10 @@ envtest: kubebuilder_2.3.1_${TEST_OS}_${TEST_ARCH}
fmt:
goimports -w $(shell go list -f '{{.Dir}}' ./...)
export GOLANG_CI_LINT_VERSION ?= v1.48.0
DOCKER_BUILD ?= docker buildx build
.PHONY: lint
lint: $(gobin_tool)
gobin -run github.com/golangci/golangci-lint/cmd/golangci-lint@v1.33.0 run ./...
lint:
$(DOCKER_BUILD) --target=lint --build-arg GOLANG_CI_LINT_VERSION --build-arg OUT_FORMAT .

View File

@@ -1,5 +1,7 @@
# Virtual Kubelet
[![Go Reference](https://pkg.go.dev/badge/github.com/virtual-kubelet/virtual-kubelet.svg)](https://pkg.go.dev/github.com/virtual-kubelet/virtual-kubelet)
Virtual Kubelet is an open source [Kubernetes kubelet](https://kubernetes.io/docs/reference/generated/kubelet/)
implementation that masquerades as a kubelet for the purposes of connecting Kubernetes to other APIs.
This allows the nodes to be backed by other services like ACI, AWS Fargate, [IoT Edge](https://github.com/Azure/iot-edge-virtual-kubelet-provider), [Tensile Kube](https://github.com/virtual-kubelet/tensile-kube) etc. The primary scenario for VK is enabling the extension of the Kubernetes API into serverless container platforms like ACI and Fargate, though we are open to others. However, it should be noted that VK is explicitly not intended to be an alternative to Kubernetes federation.
@@ -271,6 +273,11 @@ One of the roles of a Kubelet is to accept requests from the API server for
things like `kubectl logs` and `kubectl exec`. Helpers for setting this up are
provided [here](https://godoc.org/github.com/virtual-kubelet/virtual-kubelet/node/api)
#### Scrape Pod metrics
If you want to use HPA(Horizontal Pod Autoscaler) in your cluster, the provider should implement the `GetStatsSummary` function. Then metrics-server will be able to get the metrics of the pods on virtual-kubelet. Otherwise, you may see `No metrics for pod ` on metrics-server, which means the metrics of the pods on virtual-kubelet are not collected.
## Testing
### Unit tests
@@ -300,9 +307,8 @@ Enable the ServiceNodeExclusion flag, by modifying the Controller Manager manife
Virtual Kubelet follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
Sign the [CNCF CLA](https://github.com/kubernetes/community/blob/master/CLA.md) to be able to make Pull Requests to this repo.
Monthly Virtual Kubelet Office Hours are held at 10am PST on the last Thursday of every month in this [zoom meeting room](https://zoom.us/j/94701509915). Check out the calendar [here](https://calendar.google.com/calendar?cid=bjRtbGMxYWNtNXR0NXQ1a2hqZmRkNTRncGNAZ3JvdXAuY2FsZW5kYXIuZ29vZ2xlLmNvbQ).
Monthly Virtual Kubelet Office Hours are held at 10am PST on the second Thursday of every month in this [zoom meeting room](https://zoom.us/j/94701509915). Check out the calendar [here](https://calendar.google.com/calendar/embed?src=b119ced62134053de07d6c261b50d21ebde0da54f4163f5771b60ecf906e8b90%40group.calendar.google.com&ctz=America%2FLos_Angeles).
Our google drive with design specifications and meeting notes are [here](https://drive.google.com/drive/folders/19Ndu11WBCCBDowo9CrrGUHoIfd2L8Ueg?usp=sharing).
We also have a community slack channel named virtual-kubelet in the Kubernetes slack. You can also connect with the Virtual Kubelet community via the [mailing list](https://lists.cncf.io/g/virtualkubelet-dev).

View File

@@ -22,7 +22,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/pflag"
"k8s.io/klog"
klog "k8s.io/klog/v2"
)
type mapVar map[string]string
@@ -61,8 +61,10 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
flags.StringVar(&c.KubeConfigPath, "kubeconfig", c.KubeConfigPath, "kube config file to use for connecting to the Kubernetes API server")
flags.StringVar(&c.KubeNamespace, "namespace", c.KubeNamespace, "kubernetes namespace (default is 'all')")
/* #nosec */
flags.MarkDeprecated("namespace", "Nodes must watch for pods in all namespaces. This option is now ignored.") //nolint:errcheck
flags.MarkHidden("namespace") //nolint:errcheck
/* #nosec */
flags.MarkHidden("namespace") //nolint:errcheck
flags.StringVar(&c.KubeClusterDomain, "cluster-domain", c.KubeClusterDomain, "kubernetes cluster-domain (default is 'cluster.local')")
flags.StringVar(&c.NodeName, "nodename", c.NodeName, "kubernetes node name")
@@ -74,13 +76,16 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
flags.StringVar(&c.TaintKey, "taint", c.TaintKey, "Set node taint key")
flags.BoolVar(&c.DisableTaint, "disable-taint", c.DisableTaint, "disable the virtual-kubelet node taint")
/* #nosec */
flags.MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable") //nolint:errcheck
flags.IntVar(&c.PodSyncWorkers, "pod-sync-workers", c.PodSyncWorkers, `set the number of pod synchronization workers`)
flags.BoolVar(&c.EnableNodeLease, "enable-node-lease", c.EnableNodeLease, `use node leases (1.13) for node heartbeats`)
/* #nosec */
flags.MarkDeprecated("enable-node-lease", "leases are always enabled") //nolint:errcheck
flags.MarkHidden("enable-node-lease") //nolint:errcheck
/* #nosec */
flags.MarkHidden("enable-node-lease") //nolint:errcheck
flags.StringSliceVar(&c.TraceExporters, "trace-exporter", c.TraceExporters, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", AvailableTraceExporters()))
flags.StringVar(&c.TraceConfig.ServiceName, "trace-service-name", c.TraceConfig.ServiceName, "sets the name of the service used to register with the trace exporter")

View File

@@ -36,7 +36,7 @@ func getTaint(c Opts) (*corev1.Taint, error) {
key = getEnv("VKUBELET_TAINT_KEY", key)
value = getEnv("VKUBELET_TAINT_VALUE", value)
effectEnv := getEnv("VKUBELET_TAINT_EFFECT", string(c.TaintEffect))
effectEnv := getEnv("VKUBELET_TAINT_EFFECT", c.TaintEffect)
var effect corev1.TaintEffect
switch effectEnv {

View File

@@ -15,6 +15,7 @@
package root
import (
"fmt"
"os"
"path/filepath"
"strconv"
@@ -95,6 +96,8 @@ type Opts struct {
Version string
}
const maxInt32 = 1<<31 - 1
// SetDefaultOpts sets default options for unset values on the passed in option struct.
// Fields tht are already set will not be modified.
func SetDefaultOpts(c *Opts) error {
@@ -128,6 +131,10 @@ func SetDefaultOpts(c *Opts) error {
if err != nil {
return errors.Wrap(err, "error parsing KUBELET_PORT environment variable")
}
if p > maxInt32 {
return fmt.Errorf("KUBELET_PORT environment variable is too large")
}
/* #nosec */
c.ListenPort = int32(p)
} else {
c.ListenPort = DefaultListenPort

View File

@@ -73,6 +73,13 @@ func runRootCommand(ctx context.Context, s *provider.Store, c Opts) error {
}
}
// Ensure API client.
clientSet, err := nodeutil.ClientsetFromEnv(c.KubeConfigPath)
if err != nil {
return err
}
// Set-up the node provider.
mux := http.NewServeMux()
newProvider := func(cfg nodeutil.ProviderConfig) (nodeutil.Provider, node.NodeProvider, error) {
rm, err := manager.NewResourceManager(cfg.Pods, cfg.Secrets, cfg.ConfigMaps, cfg.Services)
@@ -127,6 +134,7 @@ func runRootCommand(ctx context.Context, s *provider.Store, c Opts) error {
return nil
},
nodeutil.WithClient(clientSet),
setAuth(c.NodeName, apiConfig),
nodeutil.WithTLSConfig(
nodeutil.WithKeyPairFromPath(apiConfig.CertPath, apiConfig.KeyPath),

View File

@@ -21,6 +21,7 @@ import (
"os"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
@@ -105,7 +106,8 @@ func setupZpages(ctx context.Context) {
zpages.Handle(mux, "/debug")
go func() {
// This should never terminate, if it does, it will always terminate with an error
e := http.Serve(listener, mux)
srv := &http.Server{Handler: mux, ReadHeaderTimeout: 30 * time.Second}
e := srv.Serve(listener)
if e == http.ErrServerClosed {
return
}

View File

@@ -19,7 +19,8 @@ import (
"go.opencensus.io/trace"
)
type TracingExporterOptions struct { // nolint: golint
// TracingExporterOptions is the options passed to the tracing exporter init function.
type TracingExporterOptions struct { //nolint: golint
Tags map[string]string
ServiceName string
}

View File

@@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !no_jaeger_exporter
// +build !no_jaeger_exporter
package root
import (
"errors"
"fmt"
"os"
"contrib.go.opencensus.io/exporter/jaeger"
@@ -32,7 +32,6 @@ func init() {
// NewJaegerExporter creates a new opencensus tracing exporter.
func NewJaegerExporter(opts TracingExporterOptions) (trace.Exporter, error) {
jOpts := jaeger.Options{
Endpoint: os.Getenv("JAEGER_ENDPOINT"), // deprecated
CollectorEndpoint: os.Getenv("JAEGER_COLLECTOR_ENDPOINT"),
AgentEndpoint: os.Getenv("JAEGER_AGENT_ENDPOINT"),
Username: os.Getenv("JAEGER_USER"),
@@ -42,11 +41,8 @@ func NewJaegerExporter(opts TracingExporterOptions) (trace.Exporter, error) {
},
}
if jOpts.Endpoint != "" && jOpts.CollectorEndpoint == "" { // nolintlint:staticcheck
jOpts.CollectorEndpoint = fmt.Sprintf("%s/api/traces", jOpts.Endpoint) // nolintlint:staticcheck
}
if jOpts.CollectorEndpoint == "" && jOpts.AgentEndpoint == "" { // nolintlint:staticcheck
return nil, errors.New("Must specify either JAEGER_COLLECTOR_ENDPOINT or JAEGER_AGENT_ENDPOINT")
return nil, errors.New("must specify either JAEGER_COLLECTOR_ENDPOINT or JAEGER_AGENT_ENDPOINT")
}
for k, v := range opts.Tags {

View File

@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !no_ocagent_exporter
// +build !no_ocagent_exporter
package root

View File

@@ -5,11 +5,12 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
@@ -42,7 +43,7 @@ var (
*/
// MockProvider implements the virtual-kubelet provider interface and stores pods in memory.
type MockProvider struct { // nolint:golint
type MockProvider struct { //nolint:golint
nodeName string
operatingSystem string
internalIP string
@@ -54,10 +55,12 @@ type MockProvider struct { // nolint:golint
}
// MockConfig contains a mock virtual-kubelet's configurable parameters.
type MockConfig struct { // nolint:golint
CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
Pods string `json:"pods,omitempty"`
type MockConfig struct { //nolint:golint
CPU string `json:"cpu,omitempty"`
Memory string `json:"memory,omitempty"`
Pods string `json:"pods,omitempty"`
Others map[string]string `json:"others,omitempty"`
ProviderID string `json:"providerID,omitempty"`
}
// NewMockProviderMockConfig creates a new MockV0Provider. Mock legacy provider does not implement the new asynchronous podnotifier interface
@@ -97,7 +100,7 @@ func NewMockProvider(providerConfig, nodeName, operatingSystem string, internalI
// loadConfig loads the given json configuration files.
func loadConfig(providerConfig, nodeName string) (config MockConfig, err error) {
data, err := ioutil.ReadFile(providerConfig)
data, err := os.ReadFile(providerConfig)
if err != nil {
return config, err
}
@@ -128,6 +131,11 @@ func loadConfig(providerConfig, nodeName string) (config MockConfig, err error)
if _, err = resource.ParseQuantity(config.Pods); err != nil {
return config, fmt.Errorf("Invalid pods value %v", config.Pods)
}
for _, v := range config.Others {
if _, err = resource.ParseQuantity(v); err != nil {
return config, fmt.Errorf("Invalid other value %v", v)
}
}
return config, nil
}
@@ -283,7 +291,7 @@ func (p *MockProvider) GetContainerLogs(ctx context.Context, namespace, podName,
ctx = addAttributes(ctx, span, namespaceKey, namespace, nameKey, podName, containerNameKey, containerName)
log.G(ctx).Infof("receive GetContainerLogs %q", podName)
return ioutil.NopCloser(strings.NewReader("")), nil
return io.NopCloser(strings.NewReader("")), nil
}
// RunInContainer executes a command in a container in the pod, copying data
@@ -293,6 +301,13 @@ func (p *MockProvider) RunInContainer(ctx context.Context, namespace, name, cont
return nil
}
// AttachToContainer attaches to the executing process of a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
func (p *MockProvider) AttachToContainer(ctx context.Context, namespace, name, container string, attach api.AttachIO) error {
log.G(ctx).Infof("receive AttachToContainer %q", container)
return nil
}
// GetPodStatus returns the status of a pod by name that is "running".
// returns nil if a pod by that name is not found.
func (p *MockProvider) GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error) {
@@ -328,10 +343,13 @@ func (p *MockProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) {
return pods, nil
}
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { // nolint:golint
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") // nolint:staticcheck,ineffassign
func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { //nolint:golint
ctx, span := trace.StartSpan(ctx, "mock.ConfigureNode") //nolint:staticcheck,ineffassign
defer span.End()
if p.config.ProviderID != "" {
n.Spec.ProviderID = p.config.ProviderID
}
n.Status.Capacity = p.capacity()
n.Status.Allocatable = p.capacity()
n.Status.Conditions = p.nodeConditions()
@@ -349,11 +367,15 @@ func (p *MockProvider) ConfigureNode(ctx context.Context, n *v1.Node) { // nolin
// Capacity returns a resource list containing the capacity limits.
func (p *MockProvider) capacity() v1.ResourceList {
return v1.ResourceList{
rl := v1.ResourceList{
"cpu": resource.MustParse(p.config.CPU),
"memory": resource.MustParse(p.config.Memory),
"pods": resource.MustParse(p.config.Pods),
}
for k, v := range p.config.Others {
rl[v1.ResourceName(k)] = resource.MustParse(v)
}
return rl
}
// NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), for updates to the node status
@@ -467,10 +489,14 @@ func (p *MockProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, err
for _, container := range pod.Spec.Containers {
// Grab a dummy value to be used as the total CPU usage.
// The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
/* #nosec */
dummyUsageNanoCores := uint64(rand.Uint32())
totalUsageNanoCores += dummyUsageNanoCores
// Create a dummy value to be used as the total RAM usage.
// The value should fit a uint32 in order to avoid overflows later on when computing pod stats.
/* #nosec */
dummyUsageBytes := uint64(rand.Uint32())
totalUsageBytes += dummyUsageBytes
// Append a ContainerStats object containing the dummy stats to the PodStats object.
@@ -504,6 +530,129 @@ func (p *MockProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, err
return res, nil
}
func (p *MockProvider) generateMockMetrics(metricsMap map[string][]*dto.Metric, resourceType string, label []*dto.LabelPair) map[string][]*dto.Metric {
var (
cpuMetricSuffix = "_cpu_usage_seconds_total"
memoryMetricSuffix = "_memory_working_set_bytes"
dummyValue = float64(100)
)
if metricsMap == nil {
metricsMap = map[string][]*dto.Metric{}
}
finalCpuMetricName := resourceType + cpuMetricSuffix
finalMemoryMetricName := resourceType + memoryMetricSuffix
newCPUMetric := dto.Metric{
Label: label,
Counter: &dto.Counter{
Value: &dummyValue,
},
}
newMemoryMetric := dto.Metric{
Label: label,
Gauge: &dto.Gauge{
Value: &dummyValue,
},
}
// if metric family exists add to metric array
if cpuMetrics, ok := metricsMap[finalCpuMetricName]; ok {
metricsMap[finalCpuMetricName] = append(cpuMetrics, &newCPUMetric)
} else {
metricsMap[finalCpuMetricName] = []*dto.Metric{&newCPUMetric}
}
if memoryMetrics, ok := metricsMap[finalMemoryMetricName]; ok {
metricsMap[finalMemoryMetricName] = append(memoryMetrics, &newMemoryMetric)
} else {
metricsMap[finalMemoryMetricName] = []*dto.Metric{&newMemoryMetric}
}
return metricsMap
}
func (p *MockProvider) getMetricType(metricName string) *dto.MetricType {
var (
dtoCounterMetricType = dto.MetricType_COUNTER
dtoGaugeMetricType = dto.MetricType_GAUGE
cpuMetricSuffix = "_cpu_usage_seconds_total"
memoryMetricSuffix = "_memory_working_set_bytes"
)
if strings.HasSuffix(metricName, cpuMetricSuffix) {
return &dtoCounterMetricType
}
if strings.HasSuffix(metricName, memoryMetricSuffix) {
return &dtoGaugeMetricType
}
return nil
}
func (p *MockProvider) GetMetricsResource(ctx context.Context) ([]*dto.MetricFamily, error) {
var span trace.Span
ctx, span = trace.StartSpan(ctx, "GetMetricsResource") //nolint: ineffassign,staticcheck
defer span.End()
var (
nodeNameStr = "NodeName"
podNameStr = "PodName"
containerNameStr = "containerName"
)
nodeLabels := []*dto.LabelPair{
{
Name: &nodeNameStr,
Value: &p.nodeName,
},
}
metricsMap := p.generateMockMetrics(nil, "node", nodeLabels)
for _, pod := range p.pods {
podLabels := []*dto.LabelPair{
{
Name: &nodeNameStr,
Value: &p.nodeName,
},
{
Name: &podNameStr,
Value: &pod.Name,
},
}
metricsMap = p.generateMockMetrics(metricsMap, "pod", podLabels)
for _, container := range pod.Spec.Containers {
containerLabels := []*dto.LabelPair{
{
Name: &nodeNameStr,
Value: &p.nodeName,
},
{
Name: &podNameStr,
Value: &pod.Name,
},
{
Name: &containerNameStr,
Value: &container.Name,
},
}
metricsMap = p.generateMockMetrics(metricsMap, "container", containerLabels)
}
}
res := []*dto.MetricFamily{}
for metricName := range metricsMap {
tempName := metricName
tempMetrics := metricsMap[tempName]
metricFamily := dto.MetricFamily{
Name: &tempName,
Type: p.getMetricType(tempName),
Metric: tempMetrics,
}
res = append(res, &metricFamily)
}
return res, nil
}
// NotifyPods is called to set a pod notifier callback function. This should be called before any operations are done
// within the provider.
func (p *MockProvider) NotifyPods(ctx context.Context, notifier func(*v1.Pod)) {

View File

@@ -13,7 +13,7 @@ type Store struct {
ls map[string]InitFunc
}
func NewStore() *Store { // nolint:golint
func NewStore() *Store { //nolint:golint
return &Store{
ls: make(map[string]InitFunc),
}
@@ -71,4 +71,4 @@ type InitConfig struct {
ResourceManager *manager.ResourceManager
}
type InitFunc func(InitConfig) (Provider, error) // nolint:golint
type InitFunc func(InitConfig) (Provider, error) //nolint:golint

View File

@@ -7,7 +7,7 @@ const (
OperatingSystemWindows = "windows"
)
type OperatingSystems map[string]bool // nolint:golint
type OperatingSystems map[string]bool //nolint:golint
var (
// ValidOperatingSystems defines the group of operating systems
@@ -18,7 +18,7 @@ var (
}
)
func (o OperatingSystems) Names() []string { // nolint:golint
func (o OperatingSystems) Names() []string { //nolint:golint
keys := make([]string, 0, len(o))
for k := range o {
keys = append(keys, k)

View File

@@ -6,6 +6,7 @@ import (
)
func registerMock(s *provider.Store) {
/* #nosec */
s.Register("mock", func(cfg provider.InitConfig) (provider.Provider, error) { //nolint:errcheck
return mock.NewMockProvider(
cfg.ConfigPath,

View File

@@ -0,0 +1,296 @@
# Virtual Kubelet Metrics Update
<!-- toc -->
- [Summary](#summary)
- [Motivation](#motivation)
- [Goals](#goals)
- [Non-Goals](#non-goals)
- [Proposal](#proposal)
- [Design Details](#design-details)
- [API](#api)
- [Data](#data)
- [Changes to the Provider](#changes-to-the-provider)
- [Test Plan](#test-plan)
<!-- /toc -->
## Summary
Add the new /metrics/resource endpoint in the virtual-kubelet to support the metrics server update for new Kubernetes versions `>=1.24`
## Motivation
The Kubernetes metrics server now tries to get metrics from the kubelet using the new metrics endpoint [/metrics/resource](https://github.com/kubernetes-sigs/metrics-server/commit/a2d732e5cdbfd93a6ebce221e8df0e8b463eecc6#diff-6e5b914d1403a14af1cc43582a2c9af727113037a3c6a77d8729aaefba084fb5R88),
while Virtual Kubelet is still exposing the earlier metrics endpoint [/stats/summary](https://github.com/virtual-kubelet/virtual-kubelet/blob/master/node/api/server.go#L90).
This causes metrics to break when using virtual kubelet with newer Kubernetes versions (>=1.24).
To support the new metrics server, this document proposes adding a new handler to handle the updated metrics endpoint.
This will be an additive update, and the old
[/stats/summary](https://github.com/virtual-kubelet/virtual-kubelet/blob/master/node/api/server.go#L90) endpoint will still be available to maintain backward compatibility with
the older metrics server version.
### Goals
- Support metrics for kubernetes version `>=1.24` through adding /metrics/resource endpoint handler.
### Non-Goals
- Ensure pod autoscaling works as expected with the newer kubernetes versions `>=1.24` as expected
## Proposal
Add a new handler for `/metrics/resource` endpoint that calls a new `GetMetricsResource` method in the provider,
which in-turn returns metrics using the prometheus `model.Samples` data structure as expected by the new metrics server.
The provider will need to implement the `GetMetricsResource` method in order to add support for the new `/metrics/resource` endpoint with Kubernetes version >=1.24
## Design Details
Currently the virtual kubelet code uses the `PodStatsSummaryHandler` method to set up a http handler for serving pod metrics via the `/stats/summary` endpoint.
To support the updated metrics server, we need to add another handler `PodMetricsResourceHandler` which can serve metrics via the `/metrics/resource` endpoint.
The `PodMetricsResourceHandler` calls the new `GetMetricsResource` method of the provider to get the metrics from the specific provider.
### API
Add `GetMetricsResource` to `PodHandlerConfig`
```go
type PodHandlerConfig struct { //nolint:golint
RunInContainer ContainerExecHandlerFunc
GetContainerLogs ContainerLogsHandlerFunc
// GetPods is meant to enumerate the pods that the provider knows about
GetPods PodListerFunc
// GetPodsFromKubernetes is meant to enumerate the pods that the node is meant to be running
GetPodsFromKubernetes PodListerFunc
GetStatsSummary PodStatsSummaryHandlerFunc
GetMetricsResource PodMetricsResourceHandlerFunc
StreamIdleTimeout time.Duration
StreamCreationTimeout time.Duration
}
```
Add endpoint to `PodHandler` method
```go
const MetricsResourceRouteSuffix = "/metrics/resource"
func PodHandler(p PodHandlerConfig, debug bool) http.Handler {
r := mux.NewRouter()
// This matches the behaviour in the reference kubelet
r.StrictSlash(true)
if debug {
r.HandleFunc("/runningpods/", HandleRunningPods(p.GetPods)).Methods("GET")
}
r.HandleFunc("/pods", HandleRunningPods(p.GetPodsFromKubernetes)).Methods("GET")
r.HandleFunc("/containerLogs/{namespace}/{pod}/{container}", HandleContainerLogs(p.GetContainerLogs)).Methods("GET")
r.HandleFunc(
"/exec/{namespace}/{pod}/{container}",
HandleContainerExec(
p.RunInContainer,
WithExecStreamCreationTimeout(p.StreamCreationTimeout),
WithExecStreamIdleTimeout(p.StreamIdleTimeout),
),
).Methods("POST", "GET")
if p.GetStatsSummary != nil {
f := HandlePodStatsSummary(p.GetStatsSummary)
r.HandleFunc("/stats/summary", f).Methods("GET")
r.HandleFunc("/stats/summary/", f).Methods("GET")
}
if p.GetMetricsResource != nil {
f := HandlePodMetricsResource(p.GetMetricsResource)
r.HandleFunc(MetricsResourceRouteSuffix, f).Methods("GET")
r.HandleFunc(MetricsResourceRouteSuffix+"/", f).Methods("GET")
}
r.NotFoundHandler = http.HandlerFunc(NotFound)
return r
}
```
New `PodMetricsResourceHandler` method, that uses the new `PodMetricsResourceHandlerFunc` definition.
```go
// PodMetricsResourceHandler creates an http handler for serving pod metrics.
//
// If the passed in handler func is nil this will create handlers which only
// serves http.StatusNotImplemented
func PodMetricsResourceHandler(f PodMetricsResourceHandlerFunc) http.Handler {
if f == nil {
return http.HandlerFunc(NotImplemented)
}
r := mux.NewRouter()
h := HandlePodMetricsResource(f)
r.Handle(MetricsResourceRouteSuffix, ochttp.WithRouteTag(h, "PodMetricsResourceHandler")).Methods("GET")
r.Handle(MetricsResourceRouteSuffix+"/", ochttp.WithRouteTag(h, "PodMetricsResourceHandler")).Methods("GET")
r.NotFoundHandler = http.HandlerFunc(NotFound)
return r
}
```
`HandlePodMetricsResource` method returns a HandlerFunc which serves the metrics encoded in prometheus' text format encoding as expected by the metrics-server
```go
// HandlePodMetricsResource makes an HTTP handler for implementing the kubelet /metrics/resource endpoint
func HandlePodMetricsResource(h PodMetricsResourceHandlerFunc) http.HandlerFunc {
if h == nil {
return NotImplemented
}
return handleError(func(w http.ResponseWriter, req *http.Request) error {
metrics, err := h(req.Context())
if err != nil {
if isCancelled(err) {
return err
}
return errors.Wrap(err, "error getting status from provider")
}
b, err := json.Marshal(metrics)
if err != nil {
return errors.Wrap(err, "error marshalling metrics")
}
if _, err := w.Write(b); err != nil {
return errors.Wrap(err, "could not write to client")
}
return nil
})
}
```
The `PodMetricsResourceHandlerFunc` returns the metrics data using Prometheus' `MetricFamily` data structure. More details are provided in the Data subsection
```go
// PodMetricsResourceHandlerFunc defines the handler for getting pod metrics
type PodMetricsResourceHandlerFunc func(context.Context) ([]*dto.MetricFamily, error)
```
### Data
The updated metrics server does not add any new fields to the metrics data but uses the Prometheus textparse series parser to parse and reconstruct the [MetricsBatch](https://github.com/kubernetes-sigs/metrics-server/blob/83b2e01f9825849ae5f562e47aa1a4178b5d06e5/pkg/storage/types.go#L31) data structure.
Currently virtual-kubelet is sending data to the server using the [summary](https://github.com/virtual-kubelet/virtual-kubelet/blob/be0a062aec9a5eeea3ad6fbe5aec557a235558f6/node/api/statsv1alpha1/types.go#L24) data structure. The Prometheus text parser expects a series of bytes as in the Prometheus [model.Samples](https://github.com/kubernetes/kubernetes/blob/a93eda9db305611cacd8b6ee930ab3149a08f9b0/vendor/github.com/prometheus/common/model/value.go#L184) data structure, similar to the test [here](https://github.com/prometheus/prometheus/blob/c70d85baed260f6013afd18d6cd0ffcac4339861/model/textparse/promparse_test.go#L31).
Examples of how the new metrics are defined may be seen in the Kubernetes e2e test that calls the /metrics/resource endpoint [here](https://github.com/kubernetes/kubernetes/blob/a93eda9db305611cacd8b6ee930ab3149a08f9b0/test/e2e_node/resource_metrics_test.go#L76), and the kubelet metrics defined in the Kubernetes/kubelet code [here](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/metrics/collectors/resource_metrics.go) .
```go
var (
nodeCPUUsageDesc = metrics.NewDesc("node_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the node in core-seconds",
nil,
nil,
metrics.ALPHA,
"")
nodeMemoryUsageDesc = metrics.NewDesc("node_memory_working_set_bytes",
"Current working set of the node in bytes",
nil,
nil,
metrics.ALPHA,
"")
containerCPUUsageDesc = metrics.NewDesc("container_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the container in core-seconds",
[]string{"container", "pod", "namespace"},
nil,
metrics.ALPHA,
"")
containerMemoryUsageDesc = metrics.NewDesc("container_memory_working_set_bytes",
"Current working set of the container in bytes",
[]string{"container", "pod", "namespace"},
nil,
metrics.ALPHA,
"")
podCPUUsageDesc = metrics.NewDesc("pod_cpu_usage_seconds_total",
"Cumulative cpu time consumed by the pod in core-seconds",
[]string{"pod", "namespace"},
nil,
metrics.ALPHA,
"")
podMemoryUsageDesc = metrics.NewDesc("pod_memory_working_set_bytes",
"Current working set of the pod in bytes",
[]string{"pod", "namespace"},
nil,
metrics.ALPHA,
"")
resourceScrapeResultDesc = metrics.NewDesc("scrape_error",
"1 if there was an error while getting container metrics, 0 otherwise",
nil,
nil,
metrics.ALPHA,
"")
containerStartTimeDesc = metrics.NewDesc("container_start_time_seconds",
"Start time of the container since unix epoch in seconds",
[]string{"container", "pod", "namespace"},
nil,
metrics.ALPHA,
"")
)
```
The kubernetes/kubelet code implements Prometheus' [collector](https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/metrics/collectors/resource_metrics.go#L88) interface which is used along with the k8s.io/component-base implementation of the [registry](https://github.com/kubernetes/component-base/blob/40d14bdbd62f9e2ea697f97d81d4abc72839901e/metrics/registry.go#L114) interface in order to collect and return the metrics data using the Prometheus' [MetricFamily](https://github.com/prometheus/client_model/blob/master/go/metrics.pb.go#L773) data structure.
The Gather method in the registry calls the kubelet collector's Collect method, and returns the data using the MetricFamily data structure. The metrics server expects metrics to be encoded in prometheus'
text format, and the kubelet uses the http handler from prometheus' promhttp module which returns the metrics data encoded in prometheus' text format encoding.
```go
type KubeRegistry interface {
// Deprecated
RawMustRegister(...prometheus.Collector)
// CustomRegister is our internal variant of Prometheus registry.Register
CustomRegister(c StableCollector) error
// CustomMustRegister is our internal variant of Prometheus registry.MustRegister
CustomMustRegister(cs ...StableCollector)
// Register conforms to Prometheus registry.Register
Register(Registerable) error
// MustRegister conforms to Prometheus registry.MustRegister
MustRegister(...Registerable)
// Unregister conforms to Prometheus registry.Unregister
Unregister(collector Collector) bool
// Gather conforms to Prometheus gatherer.Gather
Gather() ([]*dto.MetricFamily, error)
// Reset invokes the Reset() function on all items in the registry
// which are added as resettables.
Reset()
}
```
Prometheus MetricsFamily data structure:
```go
type MetricFamily struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
```
Therefore the provider's GetMetricsResource method should use the same return type as the Gather method in the registry interface.
### Changes to the Provider.
In order to support the new metrics endpoint the Provider must implement the GetMetricsResource method with definition
```golang
import (
dto "github.com/prometheus/client_model/go"
"context"
)
func GetMetricsResource(context.Context) ([]*dto.MetricsFamily, error) {
...
}
```
### Test Plan
- Write a provider implementation for GetMetricsResource method in ACI Provider and deploy pods get metrics using kubectl
- Run end-to-end tests with the provider implementation

129
go.mod
View File

@@ -1,34 +1,113 @@
module github.com/virtual-kubelet/virtual-kubelet
go 1.15
go 1.17
require (
contrib.go.opencensus.io/exporter/jaeger v0.1.0
contrib.go.opencensus.io/exporter/ocagent v0.4.12
github.com/bombsimon/logrusr v1.0.0
github.com/docker/spdystream v0.0.0-20170912183627-bc6354cbbc29 // indirect
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 // indirect
github.com/google/go-cmp v0.5.2
github.com/gorilla/mux v1.7.3
contrib.go.opencensus.io/exporter/jaeger v0.2.1
contrib.go.opencensus.io/exporter/ocagent v0.7.0
github.com/bombsimon/logrusr/v3 v3.0.0
github.com/google/go-cmp v0.5.9
github.com/gorilla/mux v1.8.0
github.com/mitchellh/go-homedir v1.1.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1
github.com/sirupsen/logrus v1.6.0
github.com/spf13/cobra v1.0.0
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0
github.com/prometheus/common v0.42.0
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
go.opencensus.io v0.22.2
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/api v0.15.1 // indirect
github.com/stretchr/testify v1.8.0
go.opencensus.io v0.23.0
go.opentelemetry.io/otel v0.20.0
go.opentelemetry.io/otel/sdk v0.20.0
go.opentelemetry.io/otel/trace v0.20.0
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
golang.org/x/sys v0.5.0
golang.org/x/time v0.3.0
google.golang.org/protobuf v1.28.1
gotest.tools v2.2.0+incompatible
k8s.io/api v0.19.10
k8s.io/apimachinery v0.19.10
k8s.io/apiserver v0.19.10
k8s.io/client-go v0.19.10
k8s.io/klog v1.0.0
k8s.io/klog/v2 v2.2.0
k8s.io/utils v0.0.0-20200912215256-4140de9c8800
sigs.k8s.io/controller-runtime v0.7.1
k8s.io/api v0.26.2
k8s.io/apimachinery v0.26.2
k8s.io/apiserver v0.25.0
k8s.io/client-go v0.25.0
k8s.io/klog/v2 v2.90.1
k8s.io/utils v0.0.0-20221107191617-1a15be271d1d
sigs.k8s.io/controller-runtime v0.13.0
)
require (
github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/census-instrumentation/opencensus-proto v0.2.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.6.0 // indirect
github.com/felixge/httpsnoop v1.0.1 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect
go.etcd.io/etcd/api/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.4 // indirect
go.etcd.io/etcd/client/v3 v3.5.4 // indirect
go.opentelemetry.io/contrib v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.21.0 // indirect
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/oauth2 v0.5.0 // indirect
golang.org/x/term v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
google.golang.org/api v0.57.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect
google.golang.org/grpc v1.47.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.25.0 // indirect
k8s.io/component-base v0.25.0 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.32 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

962
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,79 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotecommand
import (
"fmt"
"io"
"net/http"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
remotecommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/remotecommand"
utilexec "k8s.io/utils/exec"
)
// Attacher knows how to attach to a container in a pod.
type Attacher interface {
// AttachToContainer attaches to a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
AttachToContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error
}
// ServeAttach handles requests to attach to a container. After
// creating/receiving the required streams, it delegates the actual attachment
// to the attacher.
func ServeAttach(w http.ResponseWriter, req *http.Request, attacher Attacher, podName string, uid types.UID, container string, streamOpts *Options, idleTimeout, streamCreationTimeout time.Duration, supportedProtocols []string) {
ctx, ok := createStreams(req, w, streamOpts, supportedProtocols, idleTimeout, streamCreationTimeout)
if !ok {
// error is handled by createStreams
return
}
defer ctx.conn.Close()
err := attacher.AttachToContainer(podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0)
if err != nil {
if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() {
rc := exitErr.ExitStatus()
ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{
Status: metav1.StatusFailure,
Reason: remotecommandconsts.NonZeroExitCodeReason,
Details: &metav1.StatusDetails{
Causes: []metav1.StatusCause{
{
Type: remotecommandconsts.ExitCodeCauseType,
Message: fmt.Sprintf("%d", rc),
},
},
},
Message: fmt.Sprintf("command terminated with non-zero exit code: %v", exitErr),
}})
return
}
err = fmt.Errorf("error attaching to container: %v", err)
runtime.HandleError(err)
ctx.writeStatus(apierrors.NewInternalError(err))
return
}
ctx.writeStatus(&apierrors.StatusError{ErrStatus: metav1.Status{
Status: metav1.StatusSuccess,
}})
}

View File

@@ -123,7 +123,7 @@ func TestGetConfigMap(t *testing.T) {
}
value := configMap.Data["key-0"]
if value != "val-0" {
t.Fatal("got unexpected value", string(value))
t.Fatal("got unexpected value", value)
}
// Try to get a configmap that does not exist, and make sure we've got a "not found" error as a response.

View File

@@ -334,7 +334,7 @@ func getEnvironmentVariableValue(ctx context.Context, env *corev1.EnvVar, mappin
return getEnvironmentVariableValueWithValueFrom(ctx, env, mappingFunc, pod, container, rm, recorder)
}
// Handle values that have been directly provided after expanding variable references.
return pointer.StringPtr(expansion.Expand(env.Value, mappingFunc)), nil
return pointer.String(expansion.Expand(env.Value, mappingFunc)), nil
}
func getEnvironmentVariableValueWithValueFrom(ctx context.Context, env *corev1.EnvVar, mappingFunc func(string) string, pod *corev1.Pod, container *corev1.Container, rm *manager.ResourceManager, recorder record.EventRecorder) (*string, error) {
@@ -411,7 +411,7 @@ func getEnvironmentVariableValueWithValueFromConfigMapKeyRef(ctx context.Context
return nil, fmt.Errorf("configmap %q doesn't contain the %q key required by pod %s", vf.Name, vf.Key, pod.Name)
}
// Populate the environment variable and continue on to the next reference.
return pointer.StringPtr(keyValue), nil
return pointer.String(keyValue), nil
}
func getEnvironmentVariableValueWithValueFromSecretKeyRef(ctx context.Context, env *corev1.EnvVar, mappingFunc func(string) string, pod *corev1.Pod, container *corev1.Container, rm *manager.ResourceManager, recorder record.EventRecorder) (*string, error) {
@@ -463,7 +463,7 @@ func getEnvironmentVariableValueWithValueFromSecretKeyRef(ctx context.Context, e
return nil, fmt.Errorf("secret %q doesn't contain the %q key required by pod %s", vf.Name, vf.Key, pod.Name)
}
// Populate the environment variable and continue on to the next reference.
return pointer.StringPtr(string(keyValue)), nil
return pointer.String(string(keyValue)), nil
}
// Handle population from a field (downward API).
@@ -476,7 +476,7 @@ func getEnvironmentVariableValueWithValueFromFieldRef(ctx context.Context, env *
return nil, err
}
return pointer.StringPtr(runtimeVal), nil
return pointer.String(runtimeVal), nil
}
// podFieldSelectorRuntimeValue returns the runtime value of the given

View File

@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
api "github.com/virtual-kubelet/virtual-kubelet/node/api"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"k8s.io/apimachinery/pkg/util/net"
)
@@ -29,3 +30,21 @@ func (f *Framework) GetStatsSummary(ctx context.Context) (*stats.Summary, error)
}
return res, nil
}
// GetStatsSummary queries the /metrics/resource endpoint of the virtual-kubelet and returns the Summary object obtained as a response.
func (f *Framework) GetMetricsResource(ctx context.Context) ([]byte, error) {
// Query the /stats/summary endpoint.
b, err := f.KubeClient.CoreV1().
RESTClient().
Get().
Namespace(f.Namespace).
Resource("pods").
SubResource("proxy").
Name(net.JoinSchemeNamePort("https", f.NodeName, "10250")).
Suffix(api.MetricsResourceRouteSuffix).DoRaw(ctx)
if err != nil {
return nil, err
}
return b, nil
}

View File

@@ -4,7 +4,7 @@
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,

139
node/api/attach.go Normal file
View File

@@ -0,0 +1,139 @@
// Copyright © 2017 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"context"
"io"
"net/http"
"strings"
"time"
"github.com/gorilla/mux"
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
"github.com/virtual-kubelet/virtual-kubelet/internal/kubernetes/remotecommand"
"k8s.io/apimachinery/pkg/types"
remoteutils "k8s.io/client-go/tools/remotecommand"
)
// ContainerAttachHandlerFunc defines the handler function used for "execing" into a
// container in a pod.
type ContainerAttachHandlerFunc func(ctx context.Context, namespace, podName, containerName string, attach AttachIO) error
// HandleContainerAttach makes an http handler func from a Provider which execs a command in a pod's container
// Note that this handler currently depends on gorrilla/mux to get url parts as variables.
// TODO(@cpuguy83): don't force gorilla/mux on consumers of this function
func HandleContainerAttach(h ContainerAttachHandlerFunc, opts ...ContainerExecHandlerOption) http.HandlerFunc {
if h == nil {
return NotImplemented
}
var cfg ContainerExecHandlerConfig
for _, o := range opts {
o(&cfg)
}
if cfg.StreamIdleTimeout == 0 {
cfg.StreamIdleTimeout = 30 * time.Second
}
if cfg.StreamCreationTimeout == 0 {
cfg.StreamCreationTimeout = 30 * time.Second
}
return handleError(func(w http.ResponseWriter, req *http.Request) error {
vars := mux.Vars(req)
namespace := vars["namespace"]
pod := vars["pod"]
container := vars["container"]
supportedStreamProtocols := strings.Split(req.Header.Get("X-Stream-Protocol-Version"), ",")
streamOpts, err := getExecOptions(req)
if err != nil {
return errdefs.AsInvalidInput(err)
}
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
attach := &containerAttachContext{ctx: ctx, h: h, pod: pod, namespace: namespace, container: container}
remotecommand.ServeAttach(
w,
req,
attach,
"",
"",
container,
streamOpts,
cfg.StreamIdleTimeout,
cfg.StreamCreationTimeout,
supportedStreamProtocols,
)
return nil
})
}
type containerAttachContext struct {
h ContainerAttachHandlerFunc
namespace, pod, container string
ctx context.Context
}
// AttachToContainer Implements remotecommand.Attacher
// This is called by remotecommand.ServeAttach
func (c *containerAttachContext) AttachToContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteutils.TerminalSize, timeout time.Duration) error {
eio := &execIO{
tty: tty,
stdin: in,
stdout: out,
stderr: err,
}
if tty {
eio.chResize = make(chan TermSize)
}
ctx, cancel := context.WithCancel(c.ctx)
defer cancel()
if tty {
go func() {
send := func(s remoteutils.TerminalSize) bool {
select {
case eio.chResize <- TermSize{Width: s.Width, Height: s.Height}:
return false
case <-ctx.Done():
return true
}
}
for {
select {
case s := <-resize:
if send(s) {
return
}
case <-ctx.Done():
return
}
}
}()
}
return c.h(c.ctx, c.namespace, c.pod, c.container, eio)
}

View File

@@ -33,7 +33,9 @@ func handleError(f handlerFunc) http.HandlerFunc {
code := httpStatusCode(err)
w.WriteHeader(code)
io.WriteString(w, err.Error()) //nolint:errcheck
if _, err := io.WriteString(w, err.Error()); err != nil {
log.G(req.Context()).WithError(err).Error("error writing error response")
}
logger := log.G(req.Context()).WithError(err).WithField("httpStatusCode", code)
if code >= 500 {

67
node/api/metrics.go Normal file
View File

@@ -0,0 +1,67 @@
// Copyright © 2017 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"bytes"
"context"
"net/http"
"github.com/pkg/errors"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
)
const (
PrometheusTextFormatContentType = "text/plain; version=0.0.4"
)
// PodMetricsResourceHandlerFunc defines the handler for getting pod metrics
type PodMetricsResourceHandlerFunc func(context.Context) ([]*dto.MetricFamily, error)
// HandlePodMetricsResource makes an HTTP handler for implementing the kubelet /metrics/resource endpoint
func HandlePodMetricsResource(h PodMetricsResourceHandlerFunc) http.HandlerFunc {
if h == nil {
return NotImplemented
}
return handleError(func(w http.ResponseWriter, req *http.Request) error {
metrics, err := h(req.Context())
if err != nil {
if isCancelled(err) {
return err
}
return errors.Wrap(err, "error getting status from provider")
}
// Convert metrics to Prometheus text format.
var buffer bytes.Buffer
enc := expfmt.NewEncoder(&buffer, expfmt.FmtText)
for _, mf := range metrics {
if err := enc.Encode(mf); err != nil {
return errors.Wrap(err, "could not convert metrics to prometheus text format")
}
}
// Set the response content type to "text/plain; version=0.0.4".
w.Header().Set("Content-Type", PrometheusTextFormatContentType)
// Write the metrics in Prometheus text format to the response writer.
if _, err := w.Write(buffer.Bytes()); err != nil {
return errors.Wrap(err, "could not write to client")
}
return nil
})
}

149
node/api/metrics_test.go Normal file
View File

@@ -0,0 +1,149 @@
// Copyright © 2017 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api_test
import (
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/pkg/errors"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"google.golang.org/protobuf/proto"
)
const (
prometheusContentType = "text/plain; version=0.0.4"
)
func TestHandlePodMetricsResource(t *testing.T) {
testCases := []struct {
name string
handler api.PodMetricsResourceHandlerFunc
expectedStatusCode int
expectedError error
}{
{
name: "Valid PodMetricsResourceHandlerFunc",
handler: func(_ context.Context) ([]*dto.MetricFamily, error) {
// Create the expected metrics.
cpuUsageMetric := &dto.MetricFamily{
Name: proto.String("container_cpu_usage_seconds_total"),
Help: proto.String("[ALPHA] Cumulative cpu time consumed by the container in core-seconds"),
Type: dto.MetricType_GAUGE.Enum(),
Metric: []*dto.Metric{},
}
memoryUsageMetric := &dto.MetricFamily{
Name: proto.String("container_memory_working_set_bytes"),
Help: proto.String("[ALPHA] Current working set of the container in bytes"),
Type: dto.MetricType_GAUGE.Enum(),
Metric: []*dto.Metric{},
}
// Add the sample metrics to the metric families.
cpuUsageMetric.Metric = append(cpuUsageMetric.Metric, createSampleMetric(
map[string]string{
"container": "simple-hello-world-container",
"namespace": "k8se-apps",
"pod": "test-pod--zruwatj-86454fdc54-2wwpw",
},
0.1104636, 1680536423102,
))
cpuUsageMetric.Metric = append(cpuUsageMetric.Metric, createSampleMetric(
map[string]string{
"container": "simple-hello-world-container",
"namespace": "k8se-apps",
"pod": "test-pod--zruwatj-86454fdc54-4mzd4",
},
0.11322, 1680536423103,
))
memoryUsageMetric.Metric = append(memoryUsageMetric.Metric, createSampleMetric(
map[string]string{
"container": "simple-hello-world-container",
"namespace": "k8se-apps",
"pod": "test-pod--zruwatj-86454fdc54-2wwpw",
},
2.3277568e+07, 1680536423102,
))
memoryUsageMetric.Metric = append(memoryUsageMetric.Metric, createSampleMetric(
map[string]string{
"container": "simple-hello-world-container",
"namespace": "k8se-apps",
"pod": "test-pod--zruwatj-86454fdc54-4mzd4",
},
2.2450176e+07, 1680536423104,
))
return []*dto.MetricFamily{cpuUsageMetric, memoryUsageMetric}, nil
},
expectedStatusCode: http.StatusOK,
},
{
name: "Nil PodMetricsResourceHandlerFunc",
handler: nil,
expectedStatusCode: http.StatusNotImplemented,
},
{
name: "Error in PodMetricsResourceHandlerFunc",
handler: func(_ context.Context) ([]*dto.MetricFamily, error) {
return nil, errors.New("test error")
},
expectedStatusCode: http.StatusInternalServerError,
expectedError: errors.New("error getting status from provider: test error"),
},
// Add more test cases as needed
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
h := api.HandlePodMetricsResource(tc.handler)
require.NotNil(t, h)
rr := httptest.NewRecorder()
req, err := http.NewRequest("GET", "/metrics/resource", nil)
require.NoError(t, err)
h.ServeHTTP(rr, req)
assert.Equal(t, tc.expectedStatusCode, rr.Code)
if tc.expectedError != nil {
bodyBytes, err := ioutil.ReadAll(rr.Body)
require.NoError(t, err)
assert.Contains(t, string(bodyBytes), tc.expectedError.Error())
} else if tc.expectedStatusCode == http.StatusOK {
contentType := rr.Header().Get("Content-Type")
assert.Equal(t, prometheusContentType, contentType)
}
})
}
}
func createSampleMetric(labels map[string]string, value float64, timestamp int64) *dto.Metric {
labelPairs := []*dto.LabelPair{}
for k, v := range labels {
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(k),
Value: proto.String(v),
})
}
return &dto.Metric{Label: labelPairs, Gauge: &dto.Gauge{Value: &value}, TimestampMs: &timestamp}
}

View File

@@ -24,14 +24,15 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer"
)
type PodListerFunc func(context.Context) ([]*v1.Pod, error) // nolint:golint
type PodListerFunc func(context.Context) ([]*v1.Pod, error) //nolint:golint
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { // nolint:golint
func HandleRunningPods(getPods PodListerFunc) http.HandlerFunc { //nolint:golint
if getPods == nil {
return NotImplemented
}
scheme := runtime.NewScheme()
/* #nosec */
v1.SchemeBuilder.AddToScheme(scheme) //nolint:errcheck
codecs := serializer.NewCodecFactory(scheme)

View File

@@ -33,18 +33,22 @@ type ServeMux interface {
Handle(path string, h http.Handler)
}
type PodHandlerConfig struct { // nolint:golint
RunInContainer ContainerExecHandlerFunc
GetContainerLogs ContainerLogsHandlerFunc
type PodHandlerConfig struct { //nolint:golint
RunInContainer ContainerExecHandlerFunc
AttachToContainer ContainerAttachHandlerFunc
GetContainerLogs ContainerLogsHandlerFunc
// GetPods is meant to enumerate the pods that the provider knows about
GetPods PodListerFunc
// GetPodsFromKubernetes is meant to enumerate the pods that the node is meant to be running
GetPodsFromKubernetes PodListerFunc
GetStatsSummary PodStatsSummaryHandlerFunc
GetMetricsResource PodMetricsResourceHandlerFunc
StreamIdleTimeout time.Duration
StreamCreationTimeout time.Duration
}
const MetricsResourceRouteSuffix = "/metrics/resource"
// PodHandler creates an http handler for interacting with pods/containers.
func PodHandler(p PodHandlerConfig, debug bool) http.Handler {
r := mux.NewRouter()
@@ -65,6 +69,14 @@ func PodHandler(p PodHandlerConfig, debug bool) http.Handler {
WithExecStreamIdleTimeout(p.StreamIdleTimeout),
),
).Methods("POST", "GET")
r.HandleFunc(
"/attach/{namespace}/{pod}/{container}",
HandleContainerAttach(
p.AttachToContainer,
WithExecStreamCreationTimeout(p.StreamCreationTimeout),
WithExecStreamIdleTimeout(p.StreamIdleTimeout),
),
).Methods("POST", "GET")
if p.GetStatsSummary != nil {
f := HandlePodStatsSummary(p.GetStatsSummary)
@@ -72,6 +84,11 @@ func PodHandler(p PodHandlerConfig, debug bool) http.Handler {
r.HandleFunc("/stats/summary/", f).Methods("GET")
}
if p.GetMetricsResource != nil {
f := HandlePodMetricsResource(p.GetMetricsResource)
r.HandleFunc(MetricsResourceRouteSuffix, f).Methods("GET")
r.HandleFunc(MetricsResourceRouteSuffix+"/", f).Methods("GET")
}
r.NotFoundHandler = http.HandlerFunc(NotFound)
return r
}
@@ -79,7 +96,7 @@ func PodHandler(p PodHandlerConfig, debug bool) http.Handler {
// PodStatsSummaryHandler creates an http handler for serving pod metrics.
//
// If the passed in handler func is nil this will create handlers which only
// serves http.StatusNotImplemented
// serves http.StatusNotImplemented
func PodStatsSummaryHandler(f PodStatsSummaryHandlerFunc) http.Handler {
if f == nil {
return http.HandlerFunc(NotImplemented)
@@ -97,6 +114,26 @@ func PodStatsSummaryHandler(f PodStatsSummaryHandlerFunc) http.Handler {
return r
}
// PodMetricsResourceHandler creates an http handler for serving pod metrics.
//
// If the passed in handler func is nil this will create handlers which only
// serves http.StatusNotImplemented
func PodMetricsResourceHandler(f PodMetricsResourceHandlerFunc) http.Handler {
if f == nil {
return http.HandlerFunc(NotImplemented)
}
r := mux.NewRouter()
h := HandlePodMetricsResource(f)
r.Handle(MetricsResourceRouteSuffix, ochttp.WithRouteTag(h, "PodMetricsResourceHandler")).Methods("GET")
r.Handle(MetricsResourceRouteSuffix+"/", ochttp.WithRouteTag(h, "PodMetricsResourceHandler")).Methods("GET")
r.NotFoundHandler = http.HandlerFunc(NotFound)
return r
}
// AttachPodRoutes adds the http routes for pod stuff to the passed in serve mux.
//
// Callers should take care to namespace the serve mux as they see fit, however
@@ -111,7 +148,8 @@ func AttachPodRoutes(p PodHandlerConfig, mux ServeMux, debug bool) {
// The main reason for this struct is in case of expansion we do not need to break
// the package level API.
type PodMetricsConfig struct {
GetStatsSummary PodStatsSummaryHandlerFunc
GetStatsSummary PodStatsSummaryHandlerFunc
GetMetricsResource PodMetricsResourceHandlerFunc
}
// AttachPodMetricsRoutes adds the http routes for pod/node metrics to the passed in serve mux.
@@ -120,6 +158,7 @@ type PodMetricsConfig struct {
// these routes get called by the Kubernetes API server.
func AttachPodMetricsRoutes(p PodMetricsConfig, mux ServeMux) {
mux.Handle("/", InstrumentHandler(HandlePodStatsSummary(p.GetStatsSummary)))
mux.Handle("/", InstrumentHandler(HandlePodMetricsResource(p.GetMetricsResource)))
}
func instrumentRequest(r *http.Request) *http.Request {

View File

@@ -7,7 +7,7 @@ import (
"testing"
"time"
"github.com/bombsimon/logrusr"
"github.com/bombsimon/logrusr/v3"
"github.com/sirupsen/logrus"
"github.com/virtual-kubelet/virtual-kubelet/log"
logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus"
@@ -18,7 +18,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/client-go/kubernetes"
klogv2 "k8s.io/klog/v2"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/envtest"
)
@@ -65,7 +65,7 @@ func wrapE2ETest(ctx context.Context, env *envtest.Environment, f func(context.C
// The following requires that E2E tests are performed *sequentially*
log.L = logger
klogv2.SetLogger(logrusr.NewLogger(sl))
klog.SetLogger(logrusr.New(sl))
f(ctx, t, env)
}
}

View File

@@ -275,8 +275,8 @@ func (c *leaseController) newLease(ctx context.Context, node *corev1.Node, base
Namespace: corev1.NamespaceNodeLease,
},
Spec: coordinationv1.LeaseSpec{
HolderIdentity: pointer.StringPtr(node.Name),
LeaseDurationSeconds: pointer.Int32Ptr(c.leaseDurationSeconds),
HolderIdentity: pointer.String(node.Name),
LeaseDurationSeconds: pointer.Int32(c.leaseDurationSeconds),
},
}
} else {
@@ -333,7 +333,7 @@ func (e *nodeNotReadyError) Is(target error) bool {
return ok
}
func (e *nodeNotReadyError) As(target error) bool {
func (e *nodeNotReadyError) As(target interface{}) bool {
val, ok := target.(*nodeNotReadyError)
if ok {
*val = *e

View File

@@ -28,7 +28,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
watchutils "k8s.io/client-go/tools/watch"
"k8s.io/klog"
"k8s.io/klog/v2"
)
var (
@@ -38,7 +38,7 @@ var (
const (
// There might be a constant we can already leverage here
testNamespace = "default"
informerResyncPeriod = time.Duration(1 * time.Second)
informerResyncPeriod = 1 * time.Second
testNodeName = "testnode"
podSyncWorkers = 3
)
@@ -232,7 +232,7 @@ type system struct {
}
func (s *system) start(ctx context.Context) error {
go s.pc.Run(ctx, podSyncWorkers) // nolint:errcheck
go s.pc.Run(ctx, podSyncWorkers) //nolint:errcheck
select {
case <-s.pc.Ready():
case <-s.pc.Done():

View File

@@ -6,14 +6,14 @@ import (
"os"
"testing"
klogv1 "k8s.io/klog"
"k8s.io/klog/v2"
)
var enableEnvTest = flag.Bool("envtest", false, "Enable envtest based tests")
func TestMain(m *testing.M) {
flagset := flag.NewFlagSet("klog", flag.PanicOnError)
klogv1.InitFlags(flagset)
klog.InitFlags(flagset)
flagset.VisitAll(func(f *flag.Flag) {
flag.Var(f.Value, "klog."+f.Name, f.Usage)
})

View File

@@ -54,7 +54,7 @@ var (
//
// Note: Implementers can choose to manage a node themselves, in which case
// it is not needed to provide an implementation for this interface.
type NodeProvider interface { // nolint:golint
type NodeProvider interface { //nolint:revive
// Ping checks if the node is still active.
// This is intended to be lightweight as it will be called periodically as a
// heartbeat to keep the node marked as ready in Kubernetes.
@@ -105,7 +105,7 @@ func NewNodeController(p NodeProvider, node *corev1.Node, nodes v1.NodeInterface
}
// NodeControllerOpt are the functional options used for configuring a node
type NodeControllerOpt func(*NodeController) error // nolint:golint
type NodeControllerOpt func(*NodeController) error //nolint:revive
// WithNodeEnableLeaseV1 enables support for v1 leases.
// V1 Leases share all the same properties as v1beta1 leases, except they do not fallback like
@@ -208,7 +208,7 @@ type ErrorHandler func(context.Context, error) error
// NodeController deals with creating and managing a node object in Kubernetes.
// It can register a node with Kubernetes and periodically update its status.
// NodeController manages a single node entity.
type NodeController struct { // nolint:golint
type NodeController struct { //nolint:revive
p NodeProvider
// serverNode must be updated each time it is updated in API Server
@@ -685,10 +685,9 @@ func (t taintsStringer) String() string {
func addNodeAttributes(ctx context.Context, span trace.Span, n *corev1.Node) context.Context {
return span.WithFields(ctx, log.Fields{
"node.UID": string(n.UID),
"node.name": n.Name,
"node.cluster": n.ClusterName,
"node.taints": taintsStringer(n.Spec.Taints),
"node.UID": string(n.UID),
"node.name": n.Name,
"node.taints": taintsStringer(n.Spec.Taints),
})
}

View File

@@ -71,7 +71,7 @@ func testNodeRun(t *testing.T, enableLease bool) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
@@ -189,7 +189,7 @@ func TestNodeCustomUpdateStatusErrorHandler(t *testing.T) {
)
assert.NilError(t, err)
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
timer := time.NewTimer(10 * time.Second)
defer timer.Stop()
@@ -295,7 +295,7 @@ func TestPingAfterStatusUpdate(t *testing.T) {
node, err := NewNodeController(testP, testNode, nodes, opts...)
assert.NilError(t, err)
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
defer func() {
cancel()
<-node.Done()
@@ -363,7 +363,7 @@ func TestBeforeAnnotationsPreserved(t *testing.T) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
@@ -427,7 +427,7 @@ func TestManualConditionsPreserved(t *testing.T) {
assert.NilError(t, node.Err())
}()
go node.Run(ctx) // nolint:errcheck
go node.Run(ctx) //nolint:errcheck
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()

View File

@@ -14,6 +14,7 @@ import (
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
"k8s.io/apiserver/pkg/server/options"
"k8s.io/client-go/kubernetes"
)
@@ -33,7 +34,7 @@ type authWrapper struct {
// InstrumentAuth wraps the provided Auth in a new instrumented Auth
//
// Note: You would only need this if you rolled your own auth.
// The Auth implementations defined in this package are already instrumented.
// The Auth implementations defined in this package are already instrumented.
func InstrumentAuth(auth Auth) Auth {
if _, ok := auth.(*authWrapper); ok {
// This is already instrumented
@@ -114,13 +115,13 @@ type WebhookAuthConfig struct {
func WebhookAuth(client kubernetes.Interface, nodeName string, opts ...WebhookAuthOption) (Auth, error) {
cfg := WebhookAuthConfig{
AuthnConfig: authenticatorfactory.DelegatingAuthenticatorConfig{
CacheTTL: 2 * time.Minute, // default taken from k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1
// TODO: After upgrading k8s libs, we need to add the retry backoff option
CacheTTL: 2 * time.Minute, // default taken from k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1
WebhookRetryBackoff: options.DefaultAuthWebhookRetryBackoff(),
},
AuthzConfig: authorizerfactory.DelegatingAuthorizerConfig{
AllowCacheTTL: 5 * time.Minute, // default taken from k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1
DenyCacheTTL: 30 * time.Second, // default taken from k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1
// TODO: After upgrading k8s libs, we need to add the retry backoff option
AllowCacheTTL: 5 * time.Minute, // default taken from k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1
DenyCacheTTL: 30 * time.Second, // default taken from k8s.io/kubernetes/pkg/kubelet/apis/config/v1beta1
WebhookRetryBackoff: options.DefaultAuthWebhookRetryBackoff(),
},
}
@@ -130,8 +131,8 @@ func WebhookAuth(client kubernetes.Interface, nodeName string, opts ...WebhookAu
}
}
cfg.AuthnConfig.TokenAccessReviewClient = client.AuthenticationV1().TokenReviews()
cfg.AuthzConfig.SubjectAccessReviewClient = client.AuthorizationV1().SubjectAccessReviews()
cfg.AuthnConfig.TokenAccessReviewClient = client.AuthenticationV1()
cfg.AuthzConfig.SubjectAccessReviewClient = client.AuthorizationV1()
authn, _, err := cfg.AuthnConfig.New()
if err != nil {

View File

@@ -78,12 +78,14 @@ func (n *Node) runHTTP(ctx context.Context) (func(), error) {
log.G(ctx).Debug("Started TLS listener")
srv := &http.Server{Handler: n.h, TLSConfig: n.tlsConfig}
srv := &http.Server{Handler: n.h, TLSConfig: n.tlsConfig, ReadHeaderTimeout: 30 * time.Second}
go srv.Serve(l) //nolint:errcheck
log.G(ctx).Debug("HTTP server running")
return func() {
/* #nosec */
srv.Close()
/* #nosec */
l.Close()
}, nil
}
@@ -275,7 +277,6 @@ func WithClient(c kubernetes.Interface) NodeOpt {
// Some basic values are set for node status, you'll almost certainly want to modify it.
//
// If client is nil, this will construct a client using ClientsetFromEnv
//
// It is up to the caller to configure auth on the HTTP handler.
func NewNode(name string, newProvider NewProviderFunc, opts ...NodeOpt) (*Node, error) {
cfg := NodeConfig{
@@ -305,6 +306,8 @@ func NewNode(name string, newProvider NewProviderFunc, opts ...NodeOpt) (*Node,
},
}
cfg.Client = defaultClientFromEnv(cfg.KubeconfigPath)
for _, o := range opts {
if err := o(&cfg); err != nil {
return nil, err
@@ -316,11 +319,7 @@ func NewNode(name string, newProvider NewProviderFunc, opts ...NodeOpt) (*Node,
}
if cfg.Client == nil {
var err error
cfg.Client, err = ClientsetFromEnv(cfg.KubeconfigPath)
if err != nil {
return nil, errors.Wrap(err, "error creating clientset from env")
}
return nil, errors.New("no client provided")
}
podInformerFactory := informers.NewSharedInformerFactoryWithOptions(
@@ -428,3 +427,12 @@ func setNodeReady(n *v1.Node) {
return
}
}
func defaultClientFromEnv(kubeconfigPath string) kubernetes.Interface {
client, err := ClientsetFromEnv(kubeconfigPath)
if err != nil {
log.G(context.TODO()).WithError(err).
Warn("Failed to create clientset from env. Ignore this error If you use your own client")
}
return client
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"io"
dto "github.com/prometheus/client_model/go"
"github.com/virtual-kubelet/virtual-kubelet/node"
"github.com/virtual-kubelet/virtual-kubelet/node/api"
"github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
@@ -27,8 +28,15 @@ type Provider interface {
// between in/out/err and the container's stdin/stdout/stderr.
RunInContainer(ctx context.Context, namespace, podName, containerName string, cmd []string, attach api.AttachIO) error
// AttachToContainer attaches to the executing process of a container in the pod, copying data
// between in/out/err and the container's stdin/stdout/stderr.
AttachToContainer(ctx context.Context, namespace, podName, containerName string, attach api.AttachIO) error
// GetStatsSummary gets the stats for the node, including running pods
GetStatsSummary(context.Context) (*statsv1alpha1.Summary, error)
// GetMetricsResource gets the metrics for the node, including running pods
GetMetricsResource(context.Context) ([]*dto.MetricFamily, error)
}
// ProviderConfig holds objects created by NewNodeFromClient that a provider may need to bootstrap itself.
@@ -54,13 +62,15 @@ func AttachProviderRoutes(mux api.ServeMux) NodeOpt {
return func(cfg *NodeConfig) error {
cfg.routeAttacher = func(p Provider, cfg NodeConfig, pods corev1listers.PodLister) {
mux.Handle("/", api.PodHandler(api.PodHandlerConfig{
RunInContainer: p.RunInContainer,
GetContainerLogs: p.GetContainerLogs,
GetPods: p.GetPods,
RunInContainer: p.RunInContainer,
AttachToContainer: p.AttachToContainer,
GetContainerLogs: p.GetContainerLogs,
GetPods: p.GetPods,
GetPodsFromKubernetes: func(context.Context) ([]*v1.Pod, error) {
return pods.List(labels.Everything())
},
GetStatsSummary: p.GetStatsSummary,
GetMetricsResource: p.GetMetricsResource,
StreamIdleTimeout: cfg.StreamIdleTimeout,
StreamCreationTimeout: cfg.StreamCreationTimeout,
}, true))

View File

@@ -4,7 +4,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
)
// WithTLSConfig returns a NodeOpt which creates a base TLSConfig with the default cipher suites and tls min verions.
@@ -31,7 +31,7 @@ func WithTLSConfig(opts ...func(*tls.Config) error) NodeOpt {
// WithCAFromPath makes a TLS config option to set up client auth using the path to a PEM encoded CA cert.
func WithCAFromPath(p string) func(*tls.Config) error {
return func(cfg *tls.Config) error {
pem, err := ioutil.ReadFile(p)
pem, err := os.ReadFile(p)
if err != nil {
return fmt.Errorf("error reading ca cert pem: %w", err)
}

View File

@@ -360,7 +360,7 @@ func TestReCreatePodRace(t *testing.T) {
return true, nil, errors.NewConflict(schema.GroupResource{Group: "", Resource: "pods"}, "nginx", fmt.Errorf("test conflict"))
})
c.client.AddReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(core.DeleteAction).GetName()
name := action.(core.GetAction).GetName()
t.Logf("get pod %s", name)
return true, podCopy, nil
})
@@ -394,7 +394,7 @@ func TestReCreatePodRace(t *testing.T) {
})
c.client.AddReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(core.DeleteAction).GetName()
name := action.(core.GetAction).GetName()
t.Logf("get pod %s", name)
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "pods"}, "nginx")
})
@@ -430,7 +430,7 @@ func TestReCreatePodRace(t *testing.T) {
})
c.client.AddReactor("get", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
name := action.(core.DeleteAction).GetName()
name := action.(core.GetAction).GetName()
t.Logf("get pod %s", name)
return true, nil, errors.NewNotFound(schema.GroupResource{Group: "", Resource: "pods"}, "nginx")
})

View File

@@ -1,11 +1,13 @@
package e2e
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/prometheus/common/expfmt"
"github.com/virtual-kubelet/virtual-kubelet/internal/podutils"
stats "github.com/virtual-kubelet/virtual-kubelet/node/api/statsv1alpha1"
"gotest.tools/assert"
@@ -111,6 +113,72 @@ func (ts *EndToEndTestSuite) TestGetStatsSummary(t *testing.T) {
}
}
// TestGetMetricsResource creates a pod having two containers and queries the /metrics/resource endpoint of the virtual-kubelet.
// It expects this endpoint to return stats for the current node, as well as for the aforementioned pod and each of its two containers.
func (ts *EndToEndTestSuite) TestGetMetricsResource(t *testing.T) {
ctx := context.Background()
// Create a pod with prefix "nginx-" having three containers.
pod, err := f.CreatePod(ctx, f.CreateDummyPodObjectWithPrefix(t.Name(), "nginx", "foo", "bar", "baz"))
if err != nil {
t.Fatal(err)
}
// Delete the "nginx-0-X" pod after the test finishes.
defer func() {
if err := f.DeletePodImmediately(ctx, pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
t.Error(err)
}
}()
// Wait for the "nginx-" pod to be reported as running and ready.
if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
t.Fatal(err)
}
// Grab the stats from the provider.
metricsResourceResponse, err := f.GetMetricsResource(ctx)
if err != nil {
t.Fatal(err)
}
// decode metrics response bytes to metric family
reader := bytes.NewReader(metricsResourceResponse)
parser := expfmt.TextParser{}
metricsFamilyMap, err := parser.TextToMetricFamilies(reader)
if err != nil {
t.Fatal(err)
}
// Make sure the "nginx-" pod exists in the metrics returned.
currentContainerStatsCount := 0
found := false
for metricName, metricFamily := range metricsFamilyMap {
if metricName == "pod_cpu_usage_seconds_total" {
for _, metric := range metricFamily.Metric {
if *metric.Label[1].Value == pod.Name {
found = true
}
}
}
if metricName == "container_cpu_usage_seconds_total" {
for _, metric := range metricFamily.Metric {
if *metric.Label[1].Value == pod.Name {
currentContainerStatsCount += 1
}
}
}
}
if !found {
t.Fatalf("Pod %s not found in metrics", pod.Name)
}
// Make sure that we've got stats for all the containers in the "nginx-" pod.
desiredContainerStatsCount := len(pod.Spec.Containers)
if currentContainerStatsCount != desiredContainerStatsCount {
t.Fatalf("expected stats for %d containers, got stats for %d containers", desiredContainerStatsCount, currentContainerStatsCount)
}
}
// TestPodLifecycleGracefulDelete creates a pod and verifies that the provider has been asked to create it.
// Then, it deletes the pods and verifies that the provider has been asked to delete it.
// These verifications are made using the /stats/summary endpoint of the virtual-kubelet, by checking for the presence or absence of the pods.
@@ -403,7 +471,7 @@ func (ts *EndToEndTestSuite) TestCreatePodWithMandatoryInexistentConfigMap(t *te
// It returns an error if the specified pod is not found.
func findPodInPodStats(summary *stats.Summary, pod *v1.Pod) (int, error) {
for i, p := range summary.Pods {
if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && string(p.PodRef.UID) == string(pod.UID) {
if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && p.PodRef.UID == string(pod.UID) {
return i, nil
}
}

View File

@@ -0,0 +1,266 @@
// Copyright © 2022 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package opentelemetry implements a github.com/virtual-kubelet/virtual-kubelet/trace.Tracer
// using openTelemetry as a backend.
//
// Use this by setting `trace.T = Adapter{}`
//
// For customizing trace provider used in Adapter, set trace provider by
// `otel.SetTracerProvider(*sdktrace.TracerProvider)`. Examples of customize are setting service name,
// use your own exporter (e.g. jaeger, otlp, prometheus, zipkin, and stdout) etc. Do not forget
// to call TracerProvider.Shutdown() when you create your TracerProvider to avoid memory leak.
package opentelemetry
import (
"context"
"fmt"
"sync"
"time"
"go.opentelemetry.io/otel/codes"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/trace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
ot "go.opentelemetry.io/otel/trace"
)
type logLevel string
const (
lDebug logLevel = "DEBUG"
lInfo logLevel = "INFO"
lWarn logLevel = "WARN"
lErr logLevel = "ERROR"
lFatal logLevel = "FATAL"
)
// Adapter implements the trace.Tracer interface for openTelemetry
type Adapter struct{}
// StartSpan creates a new span from openTelemetry using the given name.
func (Adapter) StartSpan(ctx context.Context, name string) (context.Context, trace.Span) {
ctx, ots := otel.Tracer(name).Start(ctx, name)
l := log.G(ctx).WithField("method", name)
s := &span{s: ots, l: l}
ctx = log.WithLogger(ctx, s.Logger())
return ctx, s
}
type span struct {
mu sync.Mutex
s ot.Span
l log.Logger
}
func (s *span) End() {
s.s.End()
}
func (s *span) SetStatus(err error) {
if !s.s.IsRecording() {
return
}
if err == nil {
s.s.SetStatus(codes.Ok, "")
} else {
s.s.SetStatus(codes.Error, err.Error())
}
}
func (s *span) WithField(ctx context.Context, key string, val interface{}) context.Context {
s.mu.Lock()
s.l = s.l.WithField(key, val)
ctx = log.WithLogger(ctx, &logger{s: s.s, l: s.l})
s.mu.Unlock()
if s.s.IsRecording() {
s.s.SetAttributes(makeAttribute(key, val))
}
return ctx
}
func (s *span) WithFields(ctx context.Context, f log.Fields) context.Context {
s.mu.Lock()
s.l = s.l.WithFields(f)
ctx = log.WithLogger(ctx, &logger{s: s.s, l: s.l})
s.mu.Unlock()
if s.s.IsRecording() {
attrs := make([]attribute.KeyValue, 0, len(f))
for k, v := range f {
attrs = append(attrs, makeAttribute(k, v))
}
s.s.SetAttributes(attrs...)
}
return ctx
}
func (s *span) Logger() log.Logger {
return &logger{s: s.s, l: s.l}
}
type logger struct {
s ot.Span
l log.Logger
a []attribute.KeyValue
}
func (l *logger) Debug(args ...interface{}) {
l.logEvent(lDebug, args...)
}
func (l *logger) Debugf(f string, args ...interface{}) {
l.logEventf(lDebug, f, args...)
}
func (l *logger) Info(args ...interface{}) {
l.logEvent(lInfo, args...)
}
func (l *logger) Infof(f string, args ...interface{}) {
l.logEventf(lInfo, f, args...)
}
func (l *logger) Warn(args ...interface{}) {
l.logEvent(lWarn, args...)
}
func (l *logger) Warnf(f string, args ...interface{}) {
l.logEventf(lWarn, f, args...)
}
func (l *logger) Error(args ...interface{}) {
l.logEvent(lErr, args...)
}
func (l *logger) Errorf(f string, args ...interface{}) {
l.logEventf(lErr, f, args...)
}
func (l *logger) Fatal(args ...interface{}) {
l.logEvent(lFatal, args...)
}
func (l *logger) Fatalf(f string, args ...interface{}) {
l.logEventf(lFatal, f, args...)
}
func (l *logger) logEvent(ll logLevel, args ...interface{}) {
msg := fmt.Sprint(args...)
switch ll {
case lDebug:
l.l.Debug(msg)
case lInfo:
l.l.Info(msg)
case lWarn:
l.l.Warn(msg)
case lErr:
l.l.Error(msg)
case lFatal:
l.l.Fatal(msg)
}
if !l.s.IsRecording() {
return
}
l.s.AddEvent(msg, ot.WithTimestamp(time.Now()))
}
func (l *logger) logEventf(ll logLevel, f string, args ...interface{}) {
switch ll {
case lDebug:
l.l.Debugf(f, args...)
case lInfo:
l.l.Infof(f, args...)
case lWarn:
l.l.Warnf(f, args...)
case lErr:
l.l.Errorf(f, args...)
case lFatal:
l.l.Fatalf(f, args...)
}
if !l.s.IsRecording() {
return
}
msg := fmt.Sprintf(f, args...)
l.s.AddEvent(msg, ot.WithTimestamp(time.Now()))
}
func (l *logger) WithError(err error) log.Logger {
return l.WithField("err", err)
}
func (l *logger) WithField(k string, value interface{}) log.Logger {
var attrs []attribute.KeyValue
if l.s.IsRecording() {
attrs = make([]attribute.KeyValue, len(l.a)+1)
copy(attrs, l.a)
attrs[len(attrs)-1] = makeAttribute(k, value)
}
return &logger{s: l.s, a: attrs, l: l.l.WithField(k, value)}
}
func (l *logger) WithFields(fields log.Fields) log.Logger {
var attrs []attribute.KeyValue
if l.s.IsRecording() {
attrs = make([]attribute.KeyValue, len(l.a), len(l.a)+len(fields))
copy(attrs, l.a)
for k, v := range fields {
attrs = append(attrs, makeAttribute(k, v))
}
}
return &logger{s: l.s, a: attrs, l: l.l.WithFields(fields)}
}
func makeAttribute(key string, val interface{}) (attr attribute.KeyValue) {
switch v := val.(type) {
case string:
return attribute.String(key, v)
// case []string:
// return attribute.StringSlice(key, v)
case fmt.Stringer:
return attribute.Stringer(key, v)
case int:
return attribute.Int(key, v)
// case []int:
// return attribute.IntSlice(key, v)
case int64:
return attribute.Int64(key, v)
case float64:
return attribute.Float64(key, v)
// case []float64:
// return attribute.Float64Slice(key, v)
// case []int64:
// return attribute.Int64Slice(key, v)
case bool:
return attribute.Bool(key, v)
// case []bool:
// return attribute.BoolSlice(key, v)
case error:
if v == nil {
attribute.String(key, "")
}
return attribute.String(key, v.Error())
default:
return attribute.String(key, fmt.Sprintf("%+v", val))
}
}

View File

@@ -0,0 +1,660 @@
// Copyright © 2022 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package opentelemetry
import (
"context"
"fmt"
"sort"
"strconv"
"sync"
"testing"
"time"
"github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/log"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv"
"go.opentelemetry.io/otel/trace"
"gotest.tools/assert"
"gotest.tools/assert/cmp"
)
func TestStartSpan(t *testing.T) {
t.Run("addField", func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
a := Adapter{}
_, s := a.StartSpan(ctx, "name")
s.End()
})
}
func TestSetStatus(t *testing.T) {
testCases := []struct {
description string
spanName string
inputStatus error
expectedCode codes.Code
expectedDescription string
}{
{
description: "error status",
spanName: "test",
inputStatus: errors.New("fake msg"),
expectedCode: codes.Error,
expectedDescription: "fake msg",
}, {
description: "non-error status",
spanName: "test",
inputStatus: nil,
expectedCode: codes.Ok,
expectedDescription: "",
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
s.SetStatus(tt.inputStatus)
assert.Assert(t, s.s.IsRecording())
s.End()
assert.Assert(t, !s.s.IsRecording())
assert.Assert(t, e.status == tt.expectedCode)
assert.Assert(t, e.statusMessage == tt.expectedDescription)
s.SetStatus(tt.inputStatus) // should not be panic even if span is ended.
})
}
}
func TestWithField(t *testing.T) {
type field struct {
key string
value interface{}
}
testCases := []struct {
description string
spanName string
fields []field
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey2", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
}, {
description: "duplicated fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey1", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
for _, f := range tt.fields {
ctx = s.WithField(ctx, f.key, f.value)
}
s.End()
assert.Assert(t, len(e.attributes) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(e.attributes, a)
}
})
}
}
func TestWithFields(t *testing.T) {
testCases := []struct {
description string
spanName string
fields log.Fields
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: log.Fields{"testKey1": "value1"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: log.Fields{"testKey1": "value1", "testKey2": "value2"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
ctx, ots := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
l := log.G(ctx).WithField("method", tt.spanName)
s := &span{s: ots, l: l}
_ = s.WithFields(ctx, tt.fields)
s.End()
assert.Assert(t, len(e.attributes) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(e.attributes, a)
}
})
}
}
func TestLog(t *testing.T) {
testCases := []struct {
description string
spanName string
logLevel logLevel
fields log.Fields
msg string
expectedEvents []trace.Event
expectedAttributes []attribute.KeyValue
}{
{
description: "debug",
spanName: "test",
logLevel: lDebug,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []trace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "info",
spanName: "test",
logLevel: lInfo,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []trace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "warn",
spanName: "test",
logLevel: lWarn,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []trace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "error",
spanName: "test",
logLevel: lErr,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []trace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "fatal",
spanName: "test",
logLevel: lFatal,
fields: log.Fields{"testKey1": "value1"},
msg: "message",
expectedEvents: []trace.Event{{Name: "message"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
switch tt.logLevel {
case lDebug:
l.WithFields(tt.fields).Debug(tt.msg)
case lInfo:
l.WithFields(tt.fields).Info(tt.msg)
case lWarn:
l.WithFields(tt.fields).Warn(tt.msg)
case lErr:
l.WithFields(tt.fields).Error(tt.msg)
case lFatal:
l.WithFields(tt.fields).Fatal(tt.msg)
}
s.End()
assert.Assert(t, len(e.events) == len(tt.expectedEvents))
for i, event := range tt.expectedEvents {
assert.Assert(t, e.events[i].Name == event.Name)
assert.Assert(t, !e.events[i].Time.IsZero())
}
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func TestLogf(t *testing.T) {
testCases := []struct {
description string
spanName string
logLevel logLevel
msg string
fields log.Fields
args []interface{}
expectedEvents []trace.Event
expectedAttributes []attribute.KeyValue
}{
{
description: "debug",
spanName: "test",
logLevel: lDebug,
msg: "k1: %s, k2: %v, k3: %d, k4: %v",
fields: map[string]interface{}{"k1": "test", "k2": []string{"test"}, "k3": 1, "k4": []int{1}},
args: []interface{}{"test", []string{"test"}, int(1), []int{1}},
expectedEvents: []trace.Event{{Name: "k1: test, k2: [test], k3: 1, k4: [1]"}},
expectedAttributes: []attribute.KeyValue{
attribute.String("k1", "test"),
attribute.String("k2", fmt.Sprintf("%+v", []string{"test"})),
attribute.Int("k3", 1),
attribute.String("k4", fmt.Sprintf("%+v", []int{1})),
},
}, {
description: "info",
spanName: "test",
logLevel: lInfo,
msg: "k1: %d, k2: %v, k3: %f, k4: %v",
fields: map[string]interface{}{"k1": int64(3), "k2": []int64{4}, "k3": float64(2), "k4": []float64{4}},
args: []interface{}{int64(3), []int64{4}, float64(2), []float64{4}},
expectedEvents: []trace.Event{{Name: "k1: 3, k2: [4], k3: 2.000000, k4: [4]"}},
expectedAttributes: []attribute.KeyValue{
attribute.Int64("k1", 1),
attribute.String("k2", fmt.Sprintf("%+v", []int64{2})),
attribute.Float64("k3", 3),
attribute.String("k4", fmt.Sprintf("%+v", []float64{4})),
},
}, {
description: "warn",
spanName: "test",
logLevel: lWarn,
msg: "k1: %v, k2: %v",
fields: map[string]interface{}{"k1": map[int]int{1: 1}, "k2": num(1)},
args: []interface{}{map[int]int{1: 1}, num(1)},
expectedEvents: []trace.Event{{Name: "k1: map[1:1], k2: 1"}},
expectedAttributes: []attribute.KeyValue{
attribute.String("k1", "{1:1}"),
attribute.Stringer("k2", num(1)),
},
}, {
description: "error",
spanName: "test",
logLevel: lErr,
msg: "k1: %t, k2: %v, k3: %s",
fields: map[string]interface{}{"k1": true, "k2": []bool{true}, "k3": errors.New("fake")},
args: []interface{}{true, []bool{true}, errors.New("fake")},
expectedEvents: []trace.Event{{Name: "k1: true, k2: [true], k3: fake"}},
expectedAttributes: []attribute.KeyValue{
attribute.Bool("k1", true),
attribute.String("k2", fmt.Sprintf("%+v", []bool{true})),
attribute.String("k3", "fake"),
},
}, {
description: "fatal",
spanName: "test",
logLevel: lFatal,
expectedEvents: []trace.Event{{Name: ""}},
expectedAttributes: []attribute.KeyValue{},
},
}
for _, tt := range testCases {
tt := tt
t.Run(tt.description, func(t *testing.T) {
tearDown, p, e := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
switch tt.logLevel {
case lDebug:
l.WithFields(tt.fields).Debugf(tt.msg, tt.args...)
case lInfo:
l.WithFields(tt.fields).Infof(tt.msg, tt.args...)
case lWarn:
l.WithFields(tt.fields).Warnf(tt.msg, tt.args...)
case lErr:
l.WithFields(tt.fields).Errorf(tt.msg, tt.args...)
case lFatal:
l.WithFields(tt.fields).Fatalf(tt.msg, tt.args...)
}
s.End()
assert.Assert(t, len(e.events) == len(tt.expectedEvents))
for i, event := range tt.expectedEvents {
event := event
i := i
t.Run(fmt.Sprintf("event %s", event.Name), func(t *testing.T) {
assert.Check(t, cmp.Equal(e.events[i].Name, event.Name))
assert.Check(t, !e.events[i].Time.IsZero())
})
}
assert.Assert(t, cmp.Len(fl.a, len(tt.expectedAttributes)))
sort.Slice(tt.expectedAttributes, func(i, j int) bool {
return tt.expectedAttributes[i].Key < tt.expectedAttributes[j].Key
})
sort.Slice(fl.a, func(i, j int) bool {
return fl.a[i].Key < fl.a[j].Key
})
for i, a := range tt.expectedAttributes {
a := a
t.Run(fmt.Sprintf("attribute %s", a.Key), func(t *testing.T) {
assert.Assert(t, fl.a[i].Key == a.Key)
assert.Assert(t, cmp.Equal(fl.a[i].Value.Type(), a.Value.Type()))
// TODO: check value, this is harder to do since the types are unknown
})
}
l.Debugf(tt.msg, tt.args) // should not panic even if span is finished
})
}
}
func TestLogWithField(t *testing.T) {
type field struct {
key string
value interface{}
}
testCases := []struct {
description string
spanName string
fields []field
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey2", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
}, {
description: "duplicated fields",
spanName: "test",
fields: []field{{key: "testKey1", value: "value1"}, {key: "testKey1", value: "value2"}},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
for _, f := range tt.fields {
l.WithField(f.key, f.value).Info("")
}
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
l.Debug("") // should not panic even if span is finished
})
}
}
func TestLogWithError(t *testing.T) {
testCases := []struct {
description string
spanName string
err error
expectedAttributes []attribute.KeyValue
}{
{
description: "normal",
spanName: "test",
err: errors.New("fake"),
expectedAttributes: []attribute.KeyValue{{Key: "err", Value: attribute.StringValue("fake")}},
}, {
description: "nil error",
spanName: "test",
err: nil,
expectedAttributes: []attribute.KeyValue{{Key: "err", Value: attribute.StringValue("")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
l.WithError(tt.err).Error("")
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func TestLogWithFields(t *testing.T) {
testCases := []struct {
description string
spanName string
fields log.Fields
expectedAttributes []attribute.KeyValue
}{
{
description: "single field",
spanName: "test",
fields: log.Fields{"testKey1": "value1"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}},
}, {
description: "multiple unique fields",
spanName: "test",
fields: log.Fields{"testKey1": "value1", "testKey2": "value2"},
expectedAttributes: []attribute.KeyValue{{Key: "testKey1", Value: attribute.StringValue("value1")}, {Key: "testKey2", Value: attribute.StringValue("value2")}},
},
}
for _, tt := range testCases {
t.Run(tt.description, func(t *testing.T) {
tearDown, p, _ := setupSuite()
defer tearDown(p)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
_, s := otel.Tracer(tt.spanName).Start(ctx, tt.spanName)
fl := &fakeLogger{}
l := logger{s: s, l: fl, a: make([]attribute.KeyValue, 0)}
l.WithFields(tt.fields).Debug("")
s.End()
assert.Assert(t, len(fl.a) == len(tt.expectedAttributes))
for _, a := range tt.expectedAttributes {
cmp.Contains(fl.a, a)
}
})
}
}
func setupSuite() (func(provider *sdktrace.TracerProvider), *sdktrace.TracerProvider, *fakeExporter) {
r := NewResource("virtual-kubelet", "1.2.3")
e := &fakeExporter{}
p := sdktrace.NewTracerProvider(
sdktrace.WithSyncer(e),
sdktrace.WithResource(r),
sdktrace.WithSampler(sdktrace.AlwaysSample()),
)
otel.SetTracerProvider(p)
// Return a function to teardown the test
return func(provider *sdktrace.TracerProvider) {
_ = p.Shutdown(context.Background())
}, p, e
}
func NewResource(name, version string) *resource.Resource {
return resource.NewWithAttributes(
semconv.ServiceNameKey.String(name),
semconv.ServiceVersionKey.String(version),
)
}
type fakeExporter struct {
sync.Mutex
// attributes describe the aspects of the spans.
attributes []attribute.KeyValue
// Links returns all the links the span has to other spans.
links []trace.Link
// Events returns all the events that occurred within in the spans
// lifetime.
events []trace.Event
// Status returns the spans status.
status codes.Code
statusMessage string
}
func (f *fakeExporter) ExportSpans(_ context.Context, spans []*sdktrace.SpanSnapshot) error {
f.Lock()
defer f.Unlock()
f.attributes = make([]attribute.KeyValue, 0)
f.links = make([]trace.Link, 0)
f.events = make([]trace.Event, 0)
for _, s := range spans {
f.attributes = append(f.attributes, s.Attributes...)
f.links = append(f.links, s.Links...)
f.events = append(f.events, s.MessageEvents...)
f.status = s.StatusCode
f.statusMessage = s.StatusMessage
}
return nil
}
func (f *fakeExporter) Shutdown(_ context.Context) (err error) {
f.attributes = make([]attribute.KeyValue, 0)
f.links = make([]trace.Link, 0)
f.events = make([]trace.Event, 0)
return
}
type fakeLogger struct {
a []attribute.KeyValue
}
func (*fakeLogger) Debug(...interface{}) {}
func (*fakeLogger) Debugf(string, ...interface{}) {}
func (*fakeLogger) Info(...interface{}) {}
func (*fakeLogger) Infof(string, ...interface{}) {}
func (*fakeLogger) Warn(...interface{}) {}
func (*fakeLogger) Warnf(string, ...interface{}) {}
func (*fakeLogger) Error(...interface{}) {}
func (*fakeLogger) Errorf(string, ...interface{}) {}
func (*fakeLogger) Fatal(...interface{}) {}
func (*fakeLogger) Fatalf(string, ...interface{}) {}
func (l *fakeLogger) WithField(k string, v interface{}) log.Logger {
l.a = append(l.a, makeAttribute(k, v))
return l
}
func (l *fakeLogger) WithFields(fs log.Fields) log.Logger {
for k, v := range fs {
l.a = append(l.a, makeAttribute(k, v))
}
return l
}
func (l *fakeLogger) WithError(err error) log.Logger {
l.a = append(l.a, makeAttribute("err", err))
return l
}
type num int
func (i num) String() string {
return strconv.Itoa(int(i))
}