Add node provider interfaace (#526)
This starts the work of having a `NodeProvider` which is responsible for providing node details. It splits the responsibilities of node management off to a new controller. The primary change here is to add the framework pieces for node management and move the VK CLI to use this new controller. It also adds support for node leases where available. This can be enabled via the command line (disabled by default), but may fall back if we find that leaess aren't supported on the cluster.
This commit is contained in:
56
cmd/node.go
Normal file
56
cmd/node.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/version"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// vkVersion is a concatenation of the Kubernetes version the VK is built against, the string "vk" and the VK release version.
|
||||
// TODO @pires revisit after VK 1.0 is released as agreed in https://github.com/virtual-kubelet/virtual-kubelet/pull/446#issuecomment-448423176.
|
||||
vkVersion = strings.Join([]string{"v1.13.1", "vk", version.Version}, "-")
|
||||
)
|
||||
|
||||
// NodeFromProvider builds a kubernetes node object from a provider
|
||||
// This is a temporary solution until node stuff actually split off from the provider interface itself.
|
||||
func NodeFromProvider(ctx context.Context, name string, taint *v1.Taint, p providers.Provider) *v1.Node {
|
||||
taints := make([]v1.Taint, 0)
|
||||
|
||||
if taint != nil {
|
||||
taints = append(taints, *taint)
|
||||
}
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"type": "virtual-kubelet",
|
||||
"kubernetes.io/role": "agent",
|
||||
"beta.kubernetes.io/os": strings.ToLower(p.OperatingSystem()),
|
||||
"kubernetes.io/hostname": name,
|
||||
"alpha.service-controller.kubernetes.io/exclude-balancer": "true",
|
||||
},
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: taints,
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
OperatingSystem: p.OperatingSystem(),
|
||||
Architecture: "amd64",
|
||||
KubeletVersion: vkVersion,
|
||||
},
|
||||
Capacity: p.Capacity(ctx),
|
||||
Allocatable: p.Capacity(ctx),
|
||||
Conditions: p.NodeConditions(ctx),
|
||||
Addresses: p.NodeAddresses(ctx),
|
||||
DaemonEndpoints: *p.NodeDaemonEndpoints(ctx),
|
||||
},
|
||||
}
|
||||
return node
|
||||
}
|
||||
32
cmd/root.go
32
cmd/root.go
@@ -74,6 +74,7 @@ var apiConfig *apiServerConfig
|
||||
var podInformer corev1informers.PodInformer
|
||||
var kubeSharedInformerFactoryResync time.Duration
|
||||
var podSyncWorkers int
|
||||
var enableNodeLease bool
|
||||
|
||||
var userTraceExporters []string
|
||||
var userTraceConfig = TracingExporterOptions{Tags: make(map[string]string)}
|
||||
@@ -94,11 +95,22 @@ This allows users to schedule kubernetes workloads on nodes that aren't running
|
||||
|
||||
defer rootContextCancel()
|
||||
|
||||
pNode := NodeFromProvider(rootContext, nodeName, taint, p)
|
||||
node, err := vkubelet.NewNode(
|
||||
vkubelet.NaiveNodeProvider{},
|
||||
pNode,
|
||||
k8sClient.Coordination().Leases(corev1.NamespaceNodeLease),
|
||||
k8sClient.CoreV1().Nodes(),
|
||||
vkubelet.WithNodeDisableLease(!enableNodeLease),
|
||||
)
|
||||
if err != nil {
|
||||
log.G(rootContext).Fatal(err)
|
||||
}
|
||||
|
||||
vk := vkubelet.New(vkubelet.Config{
|
||||
Client: k8sClient,
|
||||
Namespace: kubeNamespace,
|
||||
NodeName: nodeName,
|
||||
Taint: taint,
|
||||
NodeName: pNode.Name,
|
||||
Provider: p,
|
||||
ResourceManager: rm,
|
||||
PodSyncWorkers: podSyncWorkers,
|
||||
@@ -118,9 +130,18 @@ This allows users to schedule kubernetes workloads on nodes that aren't running
|
||||
}
|
||||
defer cancelHTTP()
|
||||
|
||||
if err := vk.Run(rootContext); err != nil && errors.Cause(err) != context.Canceled {
|
||||
log.G(rootContext).Fatal(err)
|
||||
}
|
||||
go func() {
|
||||
if err := vk.Run(rootContext); err != nil && errors.Cause(err) != context.Canceled {
|
||||
log.G(rootContext).Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
if err := node.Run(rootContext); err != nil {
|
||||
log.G(rootContext).Fatal(err)
|
||||
}
|
||||
}()
|
||||
<-rootContext.Done()
|
||||
},
|
||||
}
|
||||
|
||||
@@ -192,6 +213,7 @@ func init() {
|
||||
RootCmd.PersistentFlags().MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable")
|
||||
RootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", `set the log level, e.g. "trace", debug", "info", "warn", "error"`)
|
||||
RootCmd.PersistentFlags().IntVar(&podSyncWorkers, "pod-sync-workers", 10, `set the number of pod synchronization workers`)
|
||||
RootCmd.PersistentFlags().BoolVar(&enableNodeLease, "enable-node-lease", false, `use node leases (1.13) for node heartbeats`)
|
||||
|
||||
RootCmd.PersistentFlags().StringSliceVar(&userTraceExporters, "trace-exporter", nil, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", AvailableTraceExporters()))
|
||||
RootCmd.PersistentFlags().StringVar(&userTraceConfig.ServiceName, "trace-service-name", "virtual-kubelet", "sets the name of the service used to register with the trace exporter")
|
||||
|
||||
Reference in New Issue
Block a user