Compare commits
15 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d3ff785e08 | ||
|
|
8493cbb42a | ||
|
|
7bd0bd0f0e | ||
|
|
10126924f9 | ||
|
|
afaae4454f | ||
|
|
f7fee27790 | ||
|
|
327c6cf319 | ||
|
|
1c4aa5d575 | ||
|
|
ab3086436e | ||
|
|
ad943ebbd8 | ||
|
|
3371c727a6 | ||
|
|
1bab3ea80f | ||
|
|
3a88dfd413 | ||
|
|
5854904f0d | ||
|
|
905709c646 |
@@ -33,21 +33,9 @@ jobs:
|
||||
- run:
|
||||
name: Build
|
||||
command: V=1 make build
|
||||
- run:
|
||||
name: Install Nomad
|
||||
command: |
|
||||
curl \
|
||||
--silent \
|
||||
--location \
|
||||
--output nomad.zip \
|
||||
https://releases.hashicorp.com/nomad/0.8.6/nomad_0.8.6_linux_amd64.zip && \
|
||||
unzip nomad.zip && \
|
||||
chmod +x nomad && \
|
||||
mv nomad /go/bin/nomad && \
|
||||
rm nomad.zip
|
||||
- run:
|
||||
name: Tests
|
||||
command: V=1 CI=1 SKIP_AWS_E2E=1 make test
|
||||
command: V=1 CI=1 make test
|
||||
|
||||
e2e:
|
||||
machine:
|
||||
@@ -57,12 +45,12 @@ jobs:
|
||||
CHANGE_MINIKUBE_NONE_USER: true
|
||||
GOPATH: /home/circleci/go
|
||||
KUBECONFIG: /home/circleci/.kube/config
|
||||
KUBERNETES_VERSION: v1.12.3
|
||||
KUBERNETES_VERSION: v1.13.7
|
||||
MINIKUBE_HOME: /home/circleci
|
||||
MINIKUBE_VERSION: v0.30.0
|
||||
MINIKUBE_VERSION: v1.2.0
|
||||
MINIKUBE_WANTUPDATENOTIFICATION: false
|
||||
MINIKUBE_WANTREPORTERRORPROMPT: false
|
||||
SKAFFOLD_VERSION: v0.18.0
|
||||
SKAFFOLD_VERSION: v0.33.0
|
||||
GO111MODULE: "on"
|
||||
steps:
|
||||
- checkout
|
||||
@@ -77,13 +65,13 @@ jobs:
|
||||
- run:
|
||||
name: Install Skaffold
|
||||
command: |
|
||||
curl -Lo skaffold https://github.com/GoogleContainerTools/skaffold/releases/download/${SKAFFOLD_VERSION}/skaffold-linux-amd64
|
||||
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/${SKAFFOLD_VERSION}/skaffold-linux-amd64
|
||||
chmod +x skaffold
|
||||
sudo mv skaffold /usr/local/bin/
|
||||
- run:
|
||||
name: Install Minikube
|
||||
command: |
|
||||
curl -Lo minikube https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-amd64
|
||||
curl -Lo minikube https://storage.googleapis.com/minikube/releases/${MINIKUBE_VERSION}/minikube-linux-amd64
|
||||
chmod +x minikube
|
||||
sudo mv minikube /usr/local/bin/
|
||||
- run:
|
||||
@@ -110,7 +98,7 @@ jobs:
|
||||
command: |
|
||||
mkdir $HOME/.go
|
||||
export PATH=$HOME/.go/bin:${PATH}
|
||||
curl -fsSL -o "/tmp/go.tar.gz" "https://dl.google.com/go/go1.12.5.linux-amd64.tar.gz"
|
||||
curl -fsSL -o "/tmp/go.tar.gz" "https://dl.google.com/go/go1.12.6.linux-amd64.tar.gz"
|
||||
tar -C $HOME/.go --strip-components=1 -xzf "/tmp/go.tar.gz"
|
||||
go version
|
||||
make e2e
|
||||
|
||||
10
Makefile.e2e
10
Makefile.e2e
@@ -1,7 +1,7 @@
|
||||
.PHONY: skaffold.validate
|
||||
skaffold.validate: kubectl_context := $(shell kubectl config current-context)
|
||||
skaffold.validate:
|
||||
if [[ ! "minikube,docker-for-desktop,docker-desktop" =~ .*"$(kubectl_context)".* ]]; then \
|
||||
@if [[ ! "minikube,docker-for-desktop,docker-desktop" =~ .*"$(kubectl_context)".* ]]; then \
|
||||
echo current-context is [$(kubectl_context)]. Must be one of [minikube,docker-for-desktop,docker-desktop]; \
|
||||
false; \
|
||||
fi
|
||||
@@ -11,7 +11,8 @@ skaffold.validate:
|
||||
# MODE must be set to one of "dev" (default), "delete" or "run", and is used as the skaffold command to be run.
|
||||
.PHONY: skaffold
|
||||
skaffold: MODE ?= dev
|
||||
skaffold: skaffold/$(MODE)
|
||||
.SECONDEXPANSION:
|
||||
skaffold: skaffold/$$(MODE)
|
||||
|
||||
.PHONY: skaffold/%
|
||||
skaffold/%: PROFILE := local
|
||||
@@ -20,6 +21,8 @@ skaffold/%: skaffold.validate
|
||||
-f $(PWD)/hack/skaffold/virtual-kubelet/skaffold.yml \
|
||||
-p $(PROFILE)
|
||||
|
||||
skaffold/run skaffold/dev: bin/e2e/virtual-kubelet
|
||||
|
||||
bin/e2e:
|
||||
@mkdir -p bin/e2e
|
||||
|
||||
@@ -39,7 +42,8 @@ e2e: e2e.clean bin/e2e/virtual-kubelet skaffold/run
|
||||
cd $(PWD)/internal/test/e2e && go test -v -timeout 5m -tags e2e ./... \
|
||||
-kubeconfig=$(KUBECONFIG) \
|
||||
-namespace=$(NAMESPACE) \
|
||||
-node-name=$(NODE_NAME) \
|
||||
-node-name=$(NODE_NAME)
|
||||
@$(MAKE) e2e.clean
|
||||
|
||||
.PHONY: e2e.clean
|
||||
e2e.clean: NODE_NAME ?= vkubelet-mock-0
|
||||
|
||||
231
README.md
231
README.md
@@ -9,9 +9,6 @@ Virtual Kubelet features a pluggable architecture and direct use of Kubernetes p
|
||||
We invite the Kubernetes ecosystem to join us in empowering developers to build
|
||||
upon our base. Join our slack channel named, virtual-kubelet, within the [Kubernetes slack group](https://kubernetes.slack.com/).
|
||||
|
||||
Please note this software is experimental and should not be used for anything
|
||||
resembling a production workload.
|
||||
|
||||
The best description is "Kubernetes API on top, programmable back."
|
||||
|
||||
#### Table of Contents
|
||||
@@ -40,43 +37,14 @@ The diagram below illustrates how Virtual-Kubelet works.
|
||||
|
||||
## Usage
|
||||
|
||||
Deploy a Kubernetes cluster and make sure it's reachable.
|
||||
Virtual Kubelet is focused on providing a library that you can consume in your
|
||||
project to build a custom Kubernetes node agent.
|
||||
|
||||
### Outside the Kubernetes cluster
|
||||
See godoc for up to date instructions on consuming this project:
|
||||
https://godoc.org/github.com/virtual-kubelet/virtual-kubelet
|
||||
|
||||
Run the binary with your chosen provider:
|
||||
|
||||
```bash
|
||||
./bin/virtual-kubelet --provider <your provider>
|
||||
```
|
||||
|
||||
Now that the virtual-kubelet is deployed run `kubectl get nodes` and you should see
|
||||
a `virtual-kubelet` node.
|
||||
|
||||
### Inside the Kubernetes cluster (Minikube or Docker for Desktop)
|
||||
|
||||
It is possible to run the Virtual Kubelet as a Kubernetes pod inside a Minikube or Docker for Desktop cluster.
|
||||
As of this writing, automation of this deployment is supported only for the mock provider, and is primarily intended at testing.
|
||||
In order to deploy the Virtual Kubelet, you need to [install `skaffold`](https://github.com/GoogleContainerTools/skaffold#installation).
|
||||
You also need to make sure that your current context is either `minikube` or `docker-for-desktop`.
|
||||
|
||||
In order to deploy the Virtual Kubelet, run the following command after the prerequisites have been met:
|
||||
|
||||
```console
|
||||
$ make skaffold
|
||||
```
|
||||
|
||||
By default, this will run `skaffold` in [_development_ mode](https://github.com/GoogleContainerTools/skaffold#skaffold-dev).
|
||||
This will make `skaffold` watch `hack/skaffold/virtual-kubelet/Dockerfile` and its dependencies for changes and re-deploy the Virtual Kubelet when said changes happen.
|
||||
It will also make `skaffold` stream logs from the Virtual Kubelet pod.
|
||||
|
||||
As an alternative, and if you are not concerned about continuous deployment and log streaming, you can run the following command instead:
|
||||
|
||||
```console
|
||||
$ make skaffold MODE=run
|
||||
```
|
||||
|
||||
This will build and deploy the Virtual Kubelet, and return.
|
||||
There are implementations available for several provides (listed above), see
|
||||
those repos for details on how to deploy.
|
||||
|
||||
## Current Features
|
||||
|
||||
@@ -89,34 +57,6 @@ This will build and deploy the Virtual Kubelet, and return.
|
||||
- bring your own virtual network
|
||||
|
||||
|
||||
## Command-Line Usage
|
||||
|
||||
```bash
|
||||
virtual-kubelet implements the Kubelet interface with a pluggable
|
||||
backend implementation allowing users to create kubernetes nodes without running the kubelet.
|
||||
This allows users to schedule kubernetes workloads on nodes that aren't running Kubernetes.
|
||||
|
||||
Usage:
|
||||
virtual-kubelet [flags]
|
||||
virtual-kubelet [command]
|
||||
|
||||
Available Commands:
|
||||
help Help about any command
|
||||
version Show the version of the program
|
||||
|
||||
Flags:
|
||||
-h, --help help for virtual-kubelet
|
||||
--kubeconfig string config file (default is $HOME/.kube/config)
|
||||
--namespace string kubernetes namespace (default is 'all')
|
||||
--nodename string kubernetes node name (default "virtual-kubelet")
|
||||
--os string Operating System (Linux/Windows) (default "Linux")
|
||||
--provider string cloud provider
|
||||
--provider-config string cloud provider configuration file
|
||||
--taint string apply taint to node, making scheduling explicit
|
||||
|
||||
Use "virtual-kubelet [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
## Providers
|
||||
|
||||
This project features a pluggable provider interface developers can implement
|
||||
@@ -184,10 +124,6 @@ using the provider, pods that are scheduled on the virtual Nomad node
|
||||
registered on Kubernetes will run as jobs on Nomad clients as they
|
||||
would on a Kubernetes node.
|
||||
|
||||
```bash
|
||||
./bin/virtual-kubelet --provider="nomad"
|
||||
```
|
||||
|
||||
For detailed instructions, follow the guide [here](https://github.com/virtual-kubelet/nomad/blob/master/README.md).
|
||||
|
||||
### OpenStack Zun Provider
|
||||
@@ -207,69 +143,114 @@ For detailed instructions, follow the guide [here](https://github.com/virtual-ku
|
||||
|
||||
### Adding a New Provider via the Provider Interface
|
||||
|
||||
The structure we chose allows you to have all the power of the Kubernetes API
|
||||
on top with a pluggable interface.
|
||||
Providers consume this project as a library which implements the core logic of
|
||||
a Kubernetes node agent (Kubelet), and wire up their implementation for
|
||||
performing the neccessary actions.
|
||||
|
||||
Create a new directory for your provider under `providers` and implement the
|
||||
following interface. Then add register your provider in
|
||||
`providers/register/<provider_name>_provider.go`. Make sure to add a build tag so that
|
||||
your provider can be excluded from being built. The format for this build tag
|
||||
should be `no_<provider_name>_provider`. Also make sure your provider has all
|
||||
necessary platform build tags, e.g. "linux" if your provider only compiles on Linux.
|
||||
There are 3 main interfaces:
|
||||
|
||||
#### PodLifecylceHandler
|
||||
|
||||
When pods are created, updated, or deleted from Kubernetes, these methods are
|
||||
called to handle those actions.
|
||||
|
||||
[godoc#PodLifecylceHandler](https://godoc.org/github.com/virtual-kubelet/virtual-kubelet/node#PodLifecycleHandler)
|
||||
|
||||
```go
|
||||
// Provider contains the methods required to implement a virtual-kubelet provider.
|
||||
type Provider interface {
|
||||
// CreatePod takes a Kubernetes Pod and deploys it within the provider.
|
||||
CreatePod(ctx context.Context, pod *v1.Pod) error
|
||||
type PodLifecycleHandler interface {
|
||||
// CreatePod takes a Kubernetes Pod and deploys it within the provider.
|
||||
CreatePod(ctx context.Context, pod *corev1.Pod) error
|
||||
|
||||
// UpdatePod takes a Kubernetes Pod and updates it within the provider.
|
||||
UpdatePod(ctx context.Context, pod *v1.Pod) error
|
||||
// UpdatePod takes a Kubernetes Pod and updates it within the provider.
|
||||
UpdatePod(ctx context.Context, pod *corev1.Pod) error
|
||||
|
||||
// DeletePod takes a Kubernetes Pod and deletes it from the provider.
|
||||
DeletePod(ctx context.Context, pod *v1.Pod) error
|
||||
// DeletePod takes a Kubernetes Pod and deletes it from the provider.
|
||||
DeletePod(ctx context.Context, pod *corev1.Pod) error
|
||||
|
||||
// GetPod retrieves a pod by name from the provider (can be cached).
|
||||
GetPod(ctx context.Context, namespace, name string) (*v1.Pod, error)
|
||||
// GetPod retrieves a pod by name from the provider (can be cached).
|
||||
GetPod(ctx context.Context, namespace, name string) (*corev1.Pod, error)
|
||||
|
||||
// GetContainerLogs retrieves the logs of a container by name from the provider.
|
||||
GetContainerLogs(ctx context.Context, namespace, podName, containerName string, tail int) (string, error)
|
||||
// GetPodStatus retrieves the status of a pod by name from the provider.
|
||||
GetPodStatus(ctx context.Context, namespace, name string) (*corev1.PodStatus, error)
|
||||
|
||||
// ExecInContainer executes a command in a container in the pod, copying data
|
||||
// between in/out/err and the container's stdin/stdout/stderr.
|
||||
ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error
|
||||
|
||||
// GetPodStatus retrieves the status of a pod by name from the provider.
|
||||
GetPodStatus(ctx context.Context, namespace, name string) (*v1.PodStatus, error)
|
||||
|
||||
// GetPods retrieves a list of all pods running on the provider (can be cached).
|
||||
GetPods(context.Context) ([]*v1.Pod, error)
|
||||
|
||||
// Capacity returns a resource list with the capacity constraints of the provider.
|
||||
Capacity(context.Context) v1.ResourceList
|
||||
|
||||
// NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), which is
|
||||
// polled periodically to update the node status within Kubernetes.
|
||||
NodeConditions(context.Context) []v1.NodeCondition
|
||||
|
||||
// NodeAddresses returns a list of addresses for the node status
|
||||
// within Kubernetes.
|
||||
NodeAddresses(context.Context) []v1.NodeAddress
|
||||
|
||||
// NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status
|
||||
// within Kubernetes.
|
||||
NodeDaemonEndpoints(context.Context) *v1.NodeDaemonEndpoints
|
||||
|
||||
// OperatingSystem returns the operating system the provider is for.
|
||||
OperatingSystem() string
|
||||
}
|
||||
|
||||
// PodMetricsProvider is an optional interface that providers can implement to expose pod stats
|
||||
type PodMetricsProvider interface {
|
||||
GetStatsSummary(context.Context) (*stats.Summary, error)
|
||||
// GetPods retrieves a list of all pods running on the provider (can be cached).
|
||||
GetPods(context.Context) ([]*corev1.Pod, error)
|
||||
}
|
||||
```
|
||||
|
||||
There is also an optional interface `PodNotifier` which enables the provider to
|
||||
asyncronously notify the virtual-kubelet about pod status changes. If this
|
||||
interface is not implemented, virtual-kubelet will periodically check the status
|
||||
of all pods.
|
||||
|
||||
It is highly recommended to implement `PodNotifier`, especially if you plan
|
||||
to run a large number of pods.
|
||||
|
||||
[godoc#PodNotifier](https://godoc.org/github.com/virtual-kubelet/virtual-kubelet/node#PodNotifier)
|
||||
|
||||
```go
|
||||
type PodNotifier interface {
|
||||
// NotifyPods instructs the notifier to call the passed in function when
|
||||
// the pod status changes.
|
||||
//
|
||||
// NotifyPods should not block callers.
|
||||
NotifyPods(context.Context, func(*corev1.Pod))
|
||||
}
|
||||
```
|
||||
|
||||
`PodLifecycleHandler` is consumed by the `PodController` which is the core
|
||||
logic for managing pods assigned to the node.
|
||||
|
||||
```go
|
||||
pc, _ := node.NewPodController(podControllerConfig) // <-- instatiates the pod controller
|
||||
pc.Run(ctx) // <-- starts watching for pods to be scheduled on the node
|
||||
```
|
||||
|
||||
#### NodeProvider
|
||||
|
||||
NodeProvider is responsible for notifying the virtual-kubelet about node status
|
||||
updates. Virtual-Kubelet will periodically check the status of the node and
|
||||
update Kubernetes accordingly.
|
||||
|
||||
[godoc#NodeProvider](https://godoc.org/github.com/virtual-kubelet/virtual-kubelet/node#NodeProvider)
|
||||
|
||||
```go
|
||||
type NodeProvider interface {
|
||||
// Ping checks if the node is still active.
|
||||
// This is intended to be lightweight as it will be called periodically as a
|
||||
// heartbeat to keep the node marked as ready in Kubernetes.
|
||||
Ping(context.Context) error
|
||||
|
||||
// NotifyNodeStatus is used to asynchronously monitor the node.
|
||||
// The passed in callback should be called any time there is a change to the
|
||||
// node's status.
|
||||
// This will generally trigger a call to the Kubernetes API server to update
|
||||
// the status.
|
||||
//
|
||||
// NotifyNodeStatus should not block callers.
|
||||
NotifyNodeStatus(ctx context.Context, cb func(*corev1.Node))
|
||||
}
|
||||
```
|
||||
|
||||
Virtual Kubelet provides a `NaiveNodeProvider` that you can use if you do not
|
||||
plan to have custom node behavior.
|
||||
|
||||
[godoc#NaiveNodeProvider](https://godoc.org/github.com/virtual-kubelet/virtual-kubelet/node#NaiveNodeProvider)
|
||||
|
||||
`NodeProvider` gets consumed by the `NodeController`, which is core logic for
|
||||
managing the node object in Kubernetes.
|
||||
|
||||
```go
|
||||
nc, _ := node.NewNodeController(nodeProvider, nodeSpec) // <-- instantiate a node controller from a node provider and a kubernetes node spec
|
||||
nc.Run(ctx) // <-- creates the node in kubernetes and starts up he controller
|
||||
```
|
||||
|
||||
#### API endpoints
|
||||
|
||||
One of the roles of a Kubelet is to accept requests from the API server for
|
||||
things like `kubectl logs` and `kubectl exec`. Helpers for setting this up are
|
||||
provided [here](https://godoc.org/github.com/virtual-kubelet/virtual-kubelet/node/api)
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit tests
|
||||
@@ -316,6 +297,12 @@ In order to do so, you should run:
|
||||
$ kubectl delete node vkubelet-mock-0
|
||||
```
|
||||
|
||||
To clean up all resources you can run:
|
||||
|
||||
```console
|
||||
$ make e2e.clean
|
||||
```
|
||||
|
||||
## Known quirks and workarounds
|
||||
|
||||
### Missing Load Balancer IP addresses for services
|
||||
|
||||
@@ -19,12 +19,12 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/provider"
|
||||
)
|
||||
|
||||
// NewCommand creates a new providers subcommand
|
||||
// This subcommand is used to determine which providers are registered.
|
||||
func NewCommand(s *providers.Store) *cobra.Command {
|
||||
func NewCommand(s *provider.Store) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "providers",
|
||||
Short: "Show the list of supported providers",
|
||||
@@ -24,9 +24,9 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/provider"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node/api"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
||||
)
|
||||
|
||||
// AcceptedCiphers is the list of accepted TLS ciphers, with known weak ciphers elided
|
||||
@@ -57,7 +57,7 @@ func loadTLSConfig(certPath, keyPath string) (*tls.Config, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func setupHTTPServer(ctx context.Context, p providers.Provider, cfg *apiServerConfig) (_ func(), retErr error) {
|
||||
func setupHTTPServer(ctx context.Context, p provider.Provider, cfg *apiServerConfig) (_ func(), retErr error) {
|
||||
var closers []io.Closer
|
||||
cancel := func() {
|
||||
for _, c := range closers {
|
||||
@@ -113,7 +113,7 @@ func setupHTTPServer(ctx context.Context, p providers.Provider, cfg *apiServerCo
|
||||
mux := http.NewServeMux()
|
||||
|
||||
var summaryHandlerFunc api.PodStatsSummaryHandlerFunc
|
||||
if mp, ok := p.(providers.PodMetricsProvider); ok {
|
||||
if mp, ok := p.(provider.PodMetricsProvider); ok {
|
||||
summaryHandlerFunc = mp.GetStatsSummary
|
||||
}
|
||||
podMetricsRoutes := api.PodMetricsConfig{
|
||||
@@ -18,8 +18,8 @@ import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/provider"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -29,7 +29,7 @@ const osLabel = "beta.kubernetes.io/os"
|
||||
|
||||
// NodeFromProvider builds a kubernetes node object from a provider
|
||||
// This is a temporary solution until node stuff actually split off from the provider interface itself.
|
||||
func NodeFromProvider(ctx context.Context, name string, taint *v1.Taint, p providers.Provider, version string) *v1.Node {
|
||||
func NodeFromProvider(ctx context.Context, name string, taint *v1.Taint, p provider.Provider, version string) *v1.Node {
|
||||
taints := make([]v1.Taint, 0)
|
||||
|
||||
if taint != nil {
|
||||
@@ -22,11 +22,11 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/provider"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/node"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -43,7 +43,7 @@ import (
|
||||
|
||||
// NewCommand creates a new top-level command.
|
||||
// This command is used to start the virtual-kubelet daemon
|
||||
func NewCommand(ctx context.Context, name string, s *providers.Store, c Opts) *cobra.Command {
|
||||
func NewCommand(ctx context.Context, name string, s *provider.Store, c Opts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: name,
|
||||
Short: name + " provides a virtual kubelet interface for your kubernetes cluster.",
|
||||
@@ -59,11 +59,11 @@ This allows users to schedule kubernetes workloads on nodes that aren't running
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRootCommand(ctx context.Context, s *providers.Store, c Opts) error {
|
||||
func runRootCommand(ctx context.Context, s *provider.Store, c Opts) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
if ok := providers.ValidOperatingSystems[c.OperatingSystem]; !ok {
|
||||
if ok := provider.ValidOperatingSystems[c.OperatingSystem]; !ok {
|
||||
return errdefs.InvalidInputf("operating system %q is not supported", c.OperatingSystem)
|
||||
}
|
||||
|
||||
@@ -119,13 +119,13 @@ func runRootCommand(ctx context.Context, s *providers.Store, c Opts) error {
|
||||
return err
|
||||
}
|
||||
|
||||
initConfig := providers.InitConfig{
|
||||
ConfigPath: c.ProviderConfigPath,
|
||||
NodeName: c.NodeName,
|
||||
OperatingSystem: c.OperatingSystem,
|
||||
ResourceManager: rm,
|
||||
DaemonPort: int32(c.ListenPort),
|
||||
InternalIP: os.Getenv("VKUBELET_POD_IP"),
|
||||
initConfig := provider.InitConfig{
|
||||
ConfigPath: c.ProviderConfigPath,
|
||||
NodeName: c.NodeName,
|
||||
OperatingSystem: c.OperatingSystem,
|
||||
ResourceManager: rm,
|
||||
DaemonPort: int32(c.ListenPort),
|
||||
InternalIP: os.Getenv("VKUBELET_POD_IP"),
|
||||
KubeClusterDomain: c.KubeClusterDomain,
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package providers
|
||||
package provider
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -1,10 +1,10 @@
|
||||
package providers
|
||||
package provider
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||
)
|
||||
|
||||
// Store is used for registering/fetching providers
|
||||
@@ -1,4 +1,4 @@
|
||||
package providers
|
||||
package provider
|
||||
|
||||
const (
|
||||
// OperatingSystemLinux is the configuration value for defining Linux.
|
||||
@@ -25,12 +25,12 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
cmdproviders "github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/commands/providers"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/commands/root"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/commands/version"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/commands/providers"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/commands/root"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/commands/version"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/provider"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
logruslogger "github.com/virtual-kubelet/virtual-kubelet/log/logrus"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/trace"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/trace/opencensus"
|
||||
)
|
||||
@@ -57,11 +57,11 @@ func main() {
|
||||
optsErr := root.SetDefaultOpts(&opts)
|
||||
opts.Version = strings.Join([]string{k8sVersion, "vk", buildVersion}, "-")
|
||||
|
||||
s := providers.NewStore()
|
||||
s := provider.NewStore()
|
||||
registerMock(s)
|
||||
|
||||
rootCmd := root.NewCommand(ctx, filepath.Base(os.Args[0]), s, opts)
|
||||
rootCmd.AddCommand(version.NewCommand(buildVersion, buildTime), cmdproviders.NewCommand(s))
|
||||
rootCmd.AddCommand(version.NewCommand(buildVersion, buildTime), providers.NewCommand(s))
|
||||
preRun := rootCmd.PreRunE
|
||||
|
||||
var logLevel string
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers/mock"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/provider"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/cmd/virtual-kubelet/internal/provider/mock"
|
||||
)
|
||||
|
||||
func registerMock(s *providers.Store) {
|
||||
s.Register("mock", func(cfg providers.InitConfig) (providers.Provider, error) {
|
||||
func registerMock(s *provider.Store) {
|
||||
s.Register("mock", func(cfg provider.InitConfig) (provider.Provider, error) {
|
||||
return mock.NewMockProvider(
|
||||
cfg.ConfigPath,
|
||||
cfg.NodeName,
|
||||
@@ -16,7 +16,7 @@ func registerMock(s *providers.Store) {
|
||||
)
|
||||
})
|
||||
|
||||
s.Register("mockV0", func(cfg providers.InitConfig) (providers.Provider, error) {
|
||||
s.Register("mockV0", func(cfg provider.InitConfig) (provider.Provider, error) {
|
||||
return mock.NewMockProvider(
|
||||
cfg.ConfigPath,
|
||||
cfg.NodeName,
|
||||
|
||||
24
doc.go
Normal file
24
doc.go
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
Package virtualkubelet is currently just for providing docs for godoc.
|
||||
|
||||
Virtual Kubelet is a project which aims to provide a library that can be
|
||||
consumed by other projects to build a Kubernetes node agent that performs the
|
||||
same basic role as the Kubelet, but fully customize the behavior.
|
||||
|
||||
*Note*: Virtual Kubelet is not the Kubelet.
|
||||
|
||||
All of the business logic for virtual-kubelet is in the `node` package. The
|
||||
node package has controllers for managing the node in Kubernetes and running
|
||||
scheduled pods against a backend service. The backend service along with the
|
||||
code wrapping what is provided in the node package is what consumers of this
|
||||
project would implement. In the interest of not duplicating examples, please
|
||||
see that package on how to get started using virtual kubelet.
|
||||
|
||||
Virtual Kubelet supports propgagation of logging and traces through a context.
|
||||
See the "log" and "trace" packages for how to use this.
|
||||
|
||||
Errors produced by and consumed from the node package are expected to conform to
|
||||
error types defined in the "errdefs" package in order to be able to understand
|
||||
the kind of failure that occurred and react accordingly.
|
||||
*/
|
||||
package virtualkubelet
|
||||
4
errdefs/doc.go
Normal file
4
errdefs/doc.go
Normal file
@@ -0,0 +1,4 @@
|
||||
// Package errdefs defines the error types that are understood by other packages
|
||||
// in this project. Consumers of this project should look here to know how to
|
||||
// produce and consume erors for this project.
|
||||
package errdefs
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: skaffold/v1alpha5
|
||||
apiVersion: skaffold/v1beta12
|
||||
kind: Config
|
||||
build:
|
||||
artifacts:
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||
testutil "github.com/virtual-kubelet/virtual-kubelet/internal/test/util"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
)
|
||||
|
||||
// TestGetPods verifies that the resource manager acts as a passthrough to a pod lister.
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||
)
|
||||
|
||||
// FakeResourceManager returns an instance of the resource manager that will return the specified objects when its "GetX" methods are called.
|
||||
|
||||
@@ -32,8 +32,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/envvars"
|
||||
"k8s.io/kubernetes/third_party/forked/golang/expansion"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
24
node/node.go
24
node/node.go
@@ -312,7 +312,7 @@ func (n *NodeController) handlePing(ctx context.Context) (retErr error) {
|
||||
}
|
||||
|
||||
func (n *NodeController) updateLease(ctx context.Context) error {
|
||||
l, err := UpdateNodeLease(ctx, n.leases, newLease(n.lease))
|
||||
l, err := updateNodeLease(ctx, n.leases, newLease(n.lease))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -324,7 +324,7 @@ func (n *NodeController) updateLease(ctx context.Context) error {
|
||||
func (n *NodeController) updateStatus(ctx context.Context, skipErrorCb bool) error {
|
||||
updateNodeStatusHeartbeat(n.n)
|
||||
|
||||
node, err := UpdateNodeStatus(ctx, n.nodes, n.n)
|
||||
node, err := updateNodeStatus(ctx, n.nodes, n.n)
|
||||
if err != nil {
|
||||
if skipErrorCb || n.nodeStatusUpdateErrorHandler == nil {
|
||||
return err
|
||||
@@ -333,7 +333,7 @@ func (n *NodeController) updateStatus(ctx context.Context, skipErrorCb bool) err
|
||||
return err
|
||||
}
|
||||
|
||||
node, err = UpdateNodeStatus(ctx, n.nodes, n.n)
|
||||
node, err = updateNodeStatus(ctx, n.nodes, n.n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -362,14 +362,12 @@ func ensureLease(ctx context.Context, leases v1beta1.LeaseInterface, lease *coor
|
||||
return l, err
|
||||
}
|
||||
|
||||
// UpdateNodeLease updates the node lease.
|
||||
// updateNodeLease updates the node lease.
|
||||
//
|
||||
// If this function returns an errors.IsNotFound(err) error, this likely means
|
||||
// that node leases are not supported, if this is the case, call UpdateNodeStatus
|
||||
// that node leases are not supported, if this is the case, call updateNodeStatus
|
||||
// instead.
|
||||
//
|
||||
// If you use this function, it is up to you to syncronize this with other operations.
|
||||
func UpdateNodeLease(ctx context.Context, leases v1beta1.LeaseInterface, lease *coord.Lease) (*coord.Lease, error) {
|
||||
func updateNodeLease(ctx context.Context, leases v1beta1.LeaseInterface, lease *coord.Lease) (*coord.Lease, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "node.UpdateNodeLease")
|
||||
defer span.End()
|
||||
|
||||
@@ -403,9 +401,9 @@ func UpdateNodeLease(ctx context.Context, leases v1beta1.LeaseInterface, lease *
|
||||
// just so we don't have to allocate this on every get request
|
||||
var emptyGetOptions = metav1.GetOptions{}
|
||||
|
||||
// PatchNodeStatus patches node status.
|
||||
// patchNodeStatus patches node status.
|
||||
// Copied from github.com/kubernetes/kubernetes/pkg/util/node
|
||||
func PatchNodeStatus(nodes v1.NodeInterface, nodeName types.NodeName, oldNode *corev1.Node, newNode *corev1.Node) (*corev1.Node, []byte, error) {
|
||||
func patchNodeStatus(nodes v1.NodeInterface, nodeName types.NodeName, oldNode *corev1.Node, newNode *corev1.Node) (*corev1.Node, []byte, error) {
|
||||
patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -441,13 +439,13 @@ func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *corev1.Nod
|
||||
return patchBytes, nil
|
||||
}
|
||||
|
||||
// UpdateNodeStatus triggers an update to the node status in Kubernetes.
|
||||
// updateNodeStatus triggers an update to the node status in Kubernetes.
|
||||
// It first fetches the current node details and then sets the status according
|
||||
// to the passed in node object.
|
||||
//
|
||||
// If you use this function, it is up to you to syncronize this with other operations.
|
||||
// This reduces the time to second-level precision.
|
||||
func UpdateNodeStatus(ctx context.Context, nodes v1.NodeInterface, n *corev1.Node) (_ *corev1.Node, retErr error) {
|
||||
func updateNodeStatus(ctx context.Context, nodes v1.NodeInterface, n *corev1.Node) (_ *corev1.Node, retErr error) {
|
||||
ctx, span := trace.StartSpan(ctx, "UpdateNodeStatus")
|
||||
defer func() {
|
||||
span.End()
|
||||
@@ -469,7 +467,7 @@ func UpdateNodeStatus(ctx context.Context, nodes v1.NodeInterface, n *corev1.Nod
|
||||
ctx = addNodeAttributes(ctx, span, node)
|
||||
|
||||
// Patch the node status to merge other changes on the node.
|
||||
updated, _, err := PatchNodeStatus(nodes, types.NodeName(n.Name), oldNode, node)
|
||||
updated, _, err := patchNodeStatus(nodes, types.NodeName(n.Name), oldNode, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -242,20 +242,20 @@ func TestUpdateNodeStatus(t *testing.T) {
|
||||
nodes := testclient.NewSimpleClientset().CoreV1().Nodes()
|
||||
|
||||
ctx := context.Background()
|
||||
updated, err := UpdateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
updated, err := updateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
assert.Equal(t, errors.IsNotFound(err), true, err)
|
||||
|
||||
_, err = nodes.Create(n)
|
||||
assert.NilError(t, err)
|
||||
|
||||
updated, err = UpdateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
updated, err = updateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, cmp.DeepEqual(n.Status, updated.Status))
|
||||
|
||||
n.Status.Phase = corev1.NodeRunning
|
||||
updated, err = UpdateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
updated, err = updateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||
assert.NilError(t, err)
|
||||
assert.Check(t, cmp.DeepEqual(n.Status, updated.Status))
|
||||
|
||||
@@ -265,7 +265,7 @@ func TestUpdateNodeStatus(t *testing.T) {
|
||||
_, err = nodes.Get(n.Name, metav1.GetOptions{})
|
||||
assert.Equal(t, errors.IsNotFound(err), true, err)
|
||||
|
||||
_, err = UpdateNodeStatus(ctx, nodes, updated.DeepCopy())
|
||||
_, err = updateNodeStatus(ctx, nodes, updated.DeepCopy())
|
||||
assert.Equal(t, errors.IsNotFound(err), true, err)
|
||||
}
|
||||
|
||||
@@ -276,7 +276,7 @@ func TestUpdateNodeLease(t *testing.T) {
|
||||
setLeaseAttrs(lease, n, 0)
|
||||
|
||||
ctx := context.Background()
|
||||
l, err := UpdateNodeLease(ctx, leases, lease)
|
||||
l, err := updateNodeLease(ctx, leases, lease)
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, l.Name, lease.Name)
|
||||
assert.Assert(t, cmp.DeepEqual(l.Spec.HolderIdentity, lease.Spec.HolderIdentity))
|
||||
@@ -289,7 +289,7 @@ func TestUpdateNodeLease(t *testing.T) {
|
||||
|
||||
l.Spec.RenewTime.Time = time.Now().Add(10 * time.Second)
|
||||
|
||||
compare, err = UpdateNodeLease(ctx, leases, l.DeepCopy())
|
||||
compare, err = updateNodeLease(ctx, leases, l.DeepCopy())
|
||||
assert.NilError(t, err)
|
||||
assert.Equal(t, compare.Spec.RenewTime.Time.Unix(), l.Spec.RenewTime.Time.Unix())
|
||||
assert.Equal(t, compare.Name, lease.Name)
|
||||
|
||||
17
node/pod.go
17
node/pod.go
@@ -131,17 +131,24 @@ func (pc *PodController) handleProviderError(ctx context.Context, span trace.Spa
|
||||
}
|
||||
|
||||
func (pc *PodController) deletePod(ctx context.Context, namespace, name string) error {
|
||||
// Grab the pod as known by the provider.
|
||||
ctx, span := trace.StartSpan(ctx, "deletePod")
|
||||
defer span.End()
|
||||
|
||||
pod, err := pc.provider.GetPod(ctx, namespace, name)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
// The provider is not aware of the pod, but we must still delete the Kubernetes API resource.
|
||||
return pc.forceDeletePodResource(ctx, namespace, name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: Some providers return a non-nil error in their GetPod implementation when the pod is not found while some other don't.
|
||||
// Hence, we ignore the error and just act upon the pod if it is non-nil (meaning that the provider still knows about the pod).
|
||||
pod, _ := pc.provider.GetPod(ctx, namespace, name)
|
||||
if pod == nil {
|
||||
// The provider is not aware of the pod, but we must still delete the Kubernetes API resource.
|
||||
return pc.forceDeletePodResource(ctx, namespace, name)
|
||||
}
|
||||
|
||||
ctx, span := trace.StartSpan(ctx, "deletePod")
|
||||
defer span.End()
|
||||
ctx = addPodAttributes(ctx, span, pod)
|
||||
|
||||
var delErr error
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/trace"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
Reference in New Issue
Block a user