Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
180cf34c3e | ||
|
|
55bd7ebaed | ||
|
|
f10596562d | ||
|
|
4edd39a8ef | ||
|
|
77c3315da4 | ||
|
|
a9ff4fed4a | ||
|
|
ebc9463783 | ||
|
|
1d22783951 | ||
|
|
4b6f79111f | ||
|
|
5991b29f76 | ||
|
|
0dbc88826c | ||
|
|
966c76368f | ||
|
|
a6bab6e3bb |
@@ -53,11 +53,12 @@ a `virtual-kubelet` node.
|
|||||||
## Current Features
|
## Current Features
|
||||||
|
|
||||||
- create, delete and update pods
|
- create, delete and update pods
|
||||||
- container logs
|
- container logs, exec, and metrics
|
||||||
- get pod, pods and pod status
|
- get pod, pods and pod status
|
||||||
- capacity
|
- capacity
|
||||||
- node addresses, node capacity, node daemon endpoints
|
- node addresses, node capacity, node daemon endpoints
|
||||||
- operating system
|
- operating system
|
||||||
|
- bring your own virtual network
|
||||||
|
|
||||||
|
|
||||||
## Command-Line Usage
|
## Command-Line Usage
|
||||||
@@ -229,7 +230,7 @@ You can generate this file by following the instructions listed in the
|
|||||||
|
|
||||||
### Missing Load Balancer IP addresses for services
|
### Missing Load Balancer IP addresses for services
|
||||||
|
|
||||||
#### When Virtual Kubelet is installed on a cluster, you cannot create external-IPs for a Service
|
#### Providers that do not support service discovery
|
||||||
|
|
||||||
Kubernetes 1.9 introduces a new flag, `ServiceNodeExclusion`, for the control plane's Controller Manager. Enabling this flag in the Controller Manager's manifest allows Kubernetes to exclude Virtual Kubelet nodes from being added to Load Balancer pools, allowing you to create public facing services with external IPs without issue.
|
Kubernetes 1.9 introduces a new flag, `ServiceNodeExclusion`, for the control plane's Controller Manager. Enabling this flag in the Controller Manager's manifest allows Kubernetes to exclude Virtual Kubelet nodes from being added to Load Balancer pools, allowing you to create public facing services with external IPs without issue.
|
||||||
|
|
||||||
|
|||||||
BIN
charts/virtual-kubelet-0.5.0.tgz
Normal file
BIN
charts/virtual-kubelet-0.5.0.tgz
Normal file
Binary file not shown.
BIN
charts/virtual-kubelet-for-aks-0.1.9.tgz
Normal file
BIN
charts/virtual-kubelet-for-aks-0.1.9.tgz
Normal file
Binary file not shown.
Binary file not shown.
@@ -1,5 +1,5 @@
|
|||||||
name: virtual-kubelet-for-aks
|
name: virtual-kubelet-for-aks
|
||||||
version: 0.1.8
|
version: 0.1.9
|
||||||
description: a Helm chart to install virtual kubelet in an AKS or ACS cluster.
|
description: a Helm chart to install virtual kubelet in an AKS or ACS cluster.
|
||||||
sources:
|
sources:
|
||||||
- https://github.com/virtual-kubelet/virtual-kubelet
|
- https://github.com/virtual-kubelet/virtual-kubelet
|
||||||
|
|||||||
@@ -41,6 +41,8 @@ spec:
|
|||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
fieldPath: status.podIP
|
fieldPath: status.podIP
|
||||||
|
- name: ACI_EXTRA_USER_AGENT
|
||||||
|
value: {{ printf "helm-chart/aks/%s/%s" .Chart.Name .Chart.Version }}
|
||||||
- name: ACI_SUBNET_NAME
|
- name: ACI_SUBNET_NAME
|
||||||
value: {{ .Values.env.aciVnetSubnetName }}
|
value: {{ .Values.env.aciVnetSubnetName }}
|
||||||
- name: ACI_SUBNET_CIDR
|
- name: ACI_SUBNET_CIDR
|
||||||
|
|||||||
Binary file not shown.
@@ -1,6 +1,6 @@
|
|||||||
name: virtual-kubelet
|
name: virtual-kubelet
|
||||||
version: 0.4.0
|
version: 0.5.0
|
||||||
appVersion: 0.4
|
appVersion: 0.5
|
||||||
description: A Helm chart to install virtual kubelet inside a Kubernetes cluster.
|
description: A Helm chart to install virtual kubelet inside a Kubernetes cluster.
|
||||||
icon: https://avatars2.githubusercontent.com/u/34250142
|
icon: https://avatars2.githubusercontent.com/u/34250142
|
||||||
sources:
|
sources:
|
||||||
|
|||||||
@@ -60,6 +60,8 @@ spec:
|
|||||||
value: {{ .aciResourceGroup }}
|
value: {{ .aciResourceGroup }}
|
||||||
- name: ACI_REGION
|
- name: ACI_REGION
|
||||||
value: {{ .aciRegion }}
|
value: {{ .aciRegion }}
|
||||||
|
- name: ACI_EXTRA_USER_AGENT
|
||||||
|
value: {{ printf "helm-chart/aks/%s/%s" $.Chart.Name $.Chart.Version }}
|
||||||
{{- else }}
|
{{- else }}
|
||||||
- name: AZURE_AUTH_LOCATION
|
- name: AZURE_AUTH_LOCATION
|
||||||
value: /etc/virtual-kubelet/credentials.json
|
value: /etc/virtual-kubelet/credentials.json
|
||||||
@@ -67,6 +69,8 @@ spec:
|
|||||||
value: {{ required "aciResourceGroup is required" .aciResourceGroup }}
|
value: {{ required "aciResourceGroup is required" .aciResourceGroup }}
|
||||||
- name: ACI_REGION
|
- name: ACI_REGION
|
||||||
value: {{ required "aciRegion is required" .aciRegion }}
|
value: {{ required "aciRegion is required" .aciRegion }}
|
||||||
|
- name: ACI_EXTRA_USER_AGENT
|
||||||
|
value: {{ printf "helm-chart/other/%s/%s" $.Chart.Name $.Chart.Version }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .vnet.enabled }}
|
{{- if .vnet.enabled }}
|
||||||
- name: ACI_SUBNET_NAME
|
- name: ACI_SUBNET_NAME
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultDaemonPort = "10250"
|
defaultDaemonPort = "10250"
|
||||||
)
|
)
|
||||||
|
|
||||||
var kubeletConfig string
|
var kubeletConfig string
|
||||||
@@ -70,7 +70,7 @@ var traceSampler string
|
|||||||
var RootCmd = &cobra.Command{
|
var RootCmd = &cobra.Command{
|
||||||
Use: "virtual-kubelet",
|
Use: "virtual-kubelet",
|
||||||
Short: "virtual-kubelet provides a virtual kubelet interface for your kubernetes cluster.",
|
Short: "virtual-kubelet provides a virtual kubelet interface for your kubernetes cluster.",
|
||||||
Long: `virtual-kubelet implements the Kubelet interface with a pluggable
|
Long: `virtual-kubelet implements the Kubelet interface with a pluggable
|
||||||
backend implementation allowing users to create kubernetes nodes without running the kubelet.
|
backend implementation allowing users to create kubernetes nodes without running the kubelet.
|
||||||
This allows users to schedule kubernetes workloads on nodes that aren't running Kubernetes.`,
|
This allows users to schedule kubernetes workloads on nodes that aren't running Kubernetes.`,
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
@@ -170,7 +170,7 @@ func init() {
|
|||||||
RootCmd.PersistentFlags().StringVar(&taintKey, "taint", "", "Set node taint key")
|
RootCmd.PersistentFlags().StringVar(&taintKey, "taint", "", "Set node taint key")
|
||||||
RootCmd.PersistentFlags().MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable")
|
RootCmd.PersistentFlags().MarkDeprecated("taint", "Taint key should now be configured using the VK_TAINT_KEY environment variable")
|
||||||
RootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", `set the log level, e.g. "trace", debug", "info", "warn", "error"`)
|
RootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", `set the log level, e.g. "trace", debug", "info", "warn", "error"`)
|
||||||
RootCmd.PersistentFlags().IntVar(&podSyncWorkers, "pod-sync-workers", 10, `set the number of pod synchronization workers. default is 10.`)
|
RootCmd.PersistentFlags().IntVar(&podSyncWorkers, "pod-sync-workers", 1, `set the number of pod synchronization workers`)
|
||||||
|
|
||||||
RootCmd.PersistentFlags().StringSliceVar(&userTraceExporters, "trace-exporter", nil, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", AvailableTraceExporters()))
|
RootCmd.PersistentFlags().StringSliceVar(&userTraceExporters, "trace-exporter", nil, fmt.Sprintf("sets the tracing exporter to use, available exporters: %s", AvailableTraceExporters()))
|
||||||
RootCmd.PersistentFlags().StringVar(&userTraceConfig.ServiceName, "trace-service-name", "virtual-kubelet", "sets the name of the service used to register with the trace exporter")
|
RootCmd.PersistentFlags().StringVar(&userTraceConfig.ServiceName, "trace-service-name", "virtual-kubelet", "sets the name of the service used to register with the trace exporter")
|
||||||
|
|||||||
@@ -178,7 +178,8 @@ func (p *ECIProvider) CreatePod(ctx context.Context, pod *v1.Pod) error {
|
|||||||
request.RestartPolicy = string(pod.Spec.RestartPolicy)
|
request.RestartPolicy = string(pod.Spec.RestartPolicy)
|
||||||
|
|
||||||
// get containers
|
// get containers
|
||||||
containers, err := p.getContainers(pod)
|
containers, err := p.getContainers(pod, false)
|
||||||
|
initContainers, err := p.getContainers(pod, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -197,6 +198,7 @@ func (p *ECIProvider) CreatePod(ctx context.Context, pod *v1.Pod) error {
|
|||||||
|
|
||||||
// assign all the things
|
// assign all the things
|
||||||
request.Containers = containers
|
request.Containers = containers
|
||||||
|
request.InitContainers = initContainers
|
||||||
request.Volumes = volumes
|
request.Volumes = volumes
|
||||||
request.ImageRegistryCredentials = creds
|
request.ImageRegistryCredentials = creds
|
||||||
CreationTimestamp := pod.CreationTimestamp.UTC().Format(podTagTimeFormat)
|
CreationTimestamp := pod.CreationTimestamp.UTC().Format(podTagTimeFormat)
|
||||||
@@ -539,9 +541,13 @@ func readDockerConfigJSONSecret(secret *v1.Secret, ips []eci.ImageRegistryCreden
|
|||||||
return ips, err
|
return ips, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ECIProvider) getContainers(pod *v1.Pod) ([]eci.CreateContainer, error) {
|
func (p *ECIProvider) getContainers(pod *v1.Pod, init bool) ([]eci.CreateContainer, error) {
|
||||||
containers := make([]eci.CreateContainer, 0, len(pod.Spec.Containers))
|
podContainers := pod.Spec.Containers
|
||||||
for _, container := range pod.Spec.Containers {
|
if init {
|
||||||
|
podContainers = pod.Spec.InitContainers
|
||||||
|
}
|
||||||
|
containers := make([]eci.CreateContainer, 0, len(podContainers))
|
||||||
|
for _, container := range podContainers {
|
||||||
c := eci.CreateContainer{
|
c := eci.CreateContainer{
|
||||||
Name: container.Name,
|
Name: container.Name,
|
||||||
Image: container.Image,
|
Image: container.Image,
|
||||||
@@ -646,9 +652,9 @@ func (p *ECIProvider) getVolumes(pod *v1.Pod) ([]eci.Volume, error) {
|
|||||||
|
|
||||||
if len(ConfigFileToPaths) != 0 {
|
if len(ConfigFileToPaths) != 0 {
|
||||||
volumes = append(volumes, eci.Volume{
|
volumes = append(volumes, eci.Volume{
|
||||||
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
|
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
ConfigFileVolumeConfigFileToPaths: ConfigFileToPaths,
|
ConfigFileToPaths: ConfigFileToPaths,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
@@ -672,9 +678,9 @@ func (p *ECIProvider) getVolumes(pod *v1.Pod) ([]eci.Volume, error) {
|
|||||||
|
|
||||||
if len(ConfigFileToPaths) != 0 {
|
if len(ConfigFileToPaths) != 0 {
|
||||||
volumes = append(volumes, eci.Volume{
|
volumes = append(volumes, eci.Volume{
|
||||||
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
|
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
ConfigFileVolumeConfigFileToPaths: ConfigFileToPaths,
|
ConfigFileToPaths: ConfigFileToPaths,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -77,6 +77,7 @@ func (client *Client) CreateContainerGroupWithCallback(request *CreateContainerG
|
|||||||
type CreateContainerGroupRequest struct {
|
type CreateContainerGroupRequest struct {
|
||||||
*requests.RpcRequest
|
*requests.RpcRequest
|
||||||
Containers []CreateContainer `position:"Query" name:"Container" type:"Repeated"`
|
Containers []CreateContainer `position:"Query" name:"Container" type:"Repeated"`
|
||||||
|
InitContainers []CreateContainer `position:"Query" name:"InitContainer" type:"Repeated"`
|
||||||
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
|
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
|
||||||
SecurityGroupId string `position:"Query" name:"SecurityGroupId"`
|
SecurityGroupId string `position:"Query" name:"SecurityGroupId"`
|
||||||
ImageRegistryCredentials []ImageRegistryCredential `position:"Query" name:"ImageRegistryCredential" type:"Repeated"`
|
ImageRegistryCredentials []ImageRegistryCredential `position:"Query" name:"ImageRegistryCredential" type:"Repeated"`
|
||||||
@@ -92,17 +93,17 @@ type CreateContainerGroupRequest struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type CreateContainer struct {
|
type CreateContainer struct {
|
||||||
Name string `position:"Query" name:"Name"`
|
Name string `name:"Name"`
|
||||||
Image string `position:"Query" name:"Image"`
|
Image string `name:"Image"`
|
||||||
Memory requests.Float `position:"Query" name:"Memory"`
|
Memory requests.Float `name:"Memory"`
|
||||||
Cpu requests.Float `position:"Query" name:"Cpu"`
|
Cpu requests.Float `name:"Cpu"`
|
||||||
WorkingDir string `position:"Query" name:"WorkingDir"`
|
WorkingDir string `name:"WorkingDir"`
|
||||||
ImagePullPolicy string `position:"Query" name:"ImagePullPolicy"`
|
ImagePullPolicy string `name:"ImagePullPolicy"`
|
||||||
Commands []string `position:"Query" name:"Command" type:"Repeated"`
|
Commands []string `name:"Command" type:"Repeated"`
|
||||||
Args []string `position:"Query" name:"Arg" type:"Repeated"`
|
Args []string `name:"Arg" type:"Repeated"`
|
||||||
VolumeMounts []VolumeMount `position:"Query" name:"VolumeMount" type:"Repeated"`
|
VolumeMounts []VolumeMount `name:"VolumeMount" type:"Repeated"`
|
||||||
Ports []ContainerPort `position:"Query" name:"Port" type:"Repeated"`
|
Ports []ContainerPort `name:"Port" type:"Repeated"`
|
||||||
EnvironmentVars []EnvironmentVar `position:"Query" name:"EnvironmentVar" type:"Repeated"`
|
EnvironmentVars []EnvironmentVar `name:"EnvironmentVar" type:"Repeated"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateContainerGroupImageRegistryCredential is a repeated param struct in CreateContainerGroupRequest
|
// CreateContainerGroupImageRegistryCredential is a repeated param struct in CreateContainerGroupRequest
|
||||||
@@ -115,8 +116,8 @@ type ImageRegistryCredential struct {
|
|||||||
// CreateContainerGroupResponse is the response struct for api CreateContainerGroup
|
// CreateContainerGroupResponse is the response struct for api CreateContainerGroup
|
||||||
type CreateContainerGroupResponse struct {
|
type CreateContainerGroupResponse struct {
|
||||||
*responses.BaseResponse
|
*responses.BaseResponse
|
||||||
RequestId string `json:"RequestId" xml:"RequestId"`
|
RequestId string
|
||||||
ContainerGroupId string `json:"ContainerGroupId" xml:"ContainerGroupId"`
|
ContainerGroupId string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateCreateContainerGroupRequest creates a request to invoke CreateContainerGroup API
|
// CreateCreateContainerGroupRequest creates a request to invoke CreateContainerGroup API
|
||||||
|
|||||||
@@ -17,6 +17,6 @@ package eci
|
|||||||
|
|
||||||
// ConfigFileVolumeConfigFileToPath is a nested struct in eci response
|
// ConfigFileVolumeConfigFileToPath is a nested struct in eci response
|
||||||
type ConfigFileToPath struct {
|
type ConfigFileToPath struct {
|
||||||
Content string `json:"Content" xml:"Content" position:"Query" name:"Content"`
|
Content string `name:"Content"`
|
||||||
Path string `json:"Path" xml:"Path" position:"Query" name:"Path"`
|
Path string `name:"Path"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,18 +17,18 @@ package eci
|
|||||||
|
|
||||||
// Container is a nested struct in eci response
|
// Container is a nested struct in eci response
|
||||||
type Container struct {
|
type Container struct {
|
||||||
Name string `json:"Name" xml:"Name" position:"Query" name:"Name"`
|
Name string `json:"Name" xml:"Name" `
|
||||||
Image string `json:"Image" xml:"Image" position:"Query" name:"Image"`
|
Image string `json:"Image" xml:"Image"`
|
||||||
Memory float64 `json:"Memory" xml:"Memory" position:"Query" name:"Memory"`
|
Memory float64 `json:"Memory" xml:"Memory"`
|
||||||
Cpu float64 `json:"Cpu" xml:"Cpu" position:"Query" name:"Cpu" `
|
Cpu float64 `json:"Cpu" xml:"Cpu"`
|
||||||
RestartCount int `json:"RestartCount" xml:"RestartCount"`
|
RestartCount int `json:"RestartCount" xml:"RestartCount"`
|
||||||
WorkingDir string `json:"WorkingDir" xml:"WorkingDir" position:"Query" name:"WorkingDir"`
|
WorkingDir string `json:"WorkingDir" xml:"WorkingDir"`
|
||||||
ImagePullPolicy string `json:"ImagePullPolicy" xml:"ImagePullPolicy" position:"Query" name:"ImagePullPolicy"`
|
ImagePullPolicy string `json:"ImagePullPolicy" xml:"ImagePullPolicy"`
|
||||||
Commands []string `json:"Commands" xml:"Commands" position:"Query" name:"Command" type:"Repeated"`
|
Commands []string `json:"Commands" xml:"Commands"`
|
||||||
Args []string `json:"Args" xml:"Args" position:"Query" name:"Arg" type:"Repeated"`
|
Args []string `json:"Args" xml:"Args"`
|
||||||
PreviousState ContainerState `json:"PreviousState" xml:"PreviousState"`
|
PreviousState ContainerState `json:"PreviousState" xml:"PreviousState"`
|
||||||
CurrentState ContainerState `json:"CurrentState" xml:"CurrentState"`
|
CurrentState ContainerState `json:"CurrentState" xml:"CurrentState"`
|
||||||
VolumeMounts []VolumeMount `json:"VolumeMounts" xml:"VolumeMounts" position:"Query" name:"VolumeMount" type:"Repeated"`
|
VolumeMounts []VolumeMount `json:"VolumeMounts" xml:"VolumeMounts"`
|
||||||
Ports []ContainerPort `json:"Ports" xml:"Ports" position:"Query" name:"Port" type:"Repeated"`
|
Ports []ContainerPort `json:"Ports" xml:"Ports"`
|
||||||
EnvironmentVars []EnvironmentVar `json:"EnvironmentVars" xml:"EnvironmentVars" position:"Query" name:"EnvironmentVar" type:"Repeated"`
|
EnvironmentVars []EnvironmentVar `json:"EnvironmentVars" xml:"EnvironmentVars"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -21,6 +21,6 @@ import (
|
|||||||
|
|
||||||
// ContainerPort is a nested struct in eci response
|
// ContainerPort is a nested struct in eci response
|
||||||
type ContainerPort struct {
|
type ContainerPort struct {
|
||||||
Port requests.Integer `json:"Port" xml:"Port" position:"Query" name:"Port"`
|
Port requests.Integer `name:"Port"`
|
||||||
Protocol string `json:"Protocol" xml:"Protocol" position:"Query" name:"Protocol"`
|
Protocol string `name:"Protocol"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,6 @@ package eci
|
|||||||
|
|
||||||
// EnvironmentVar is a nested struct in eci response
|
// EnvironmentVar is a nested struct in eci response
|
||||||
type EnvironmentVar struct {
|
type EnvironmentVar struct {
|
||||||
Key string `json:"Key" xml:"Key" position:"Query" name:"Key"`
|
Key string `name:"Key"`
|
||||||
Value string `json:"Value" xml:"Value" position:"Query" name:"Value"`
|
Value string `name:"Value"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,6 +17,6 @@ package eci
|
|||||||
|
|
||||||
// Label is a nested struct in eci response
|
// Label is a nested struct in eci response
|
||||||
type Tag struct {
|
type Tag struct {
|
||||||
Key string `json:"Key" xml:"Key" position:"Query" name:"Key"`
|
Key string `name:"Key"`
|
||||||
Value string `json:"Value" xml:"Value" position:"Query" name:"Value"`
|
Value string `name:"Value"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,11 +25,11 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Volume struct {
|
type Volume struct {
|
||||||
Type string `json:"Type" xml:"Type" position:"Query" name:"Type"`
|
Type string `name:"Type"`
|
||||||
Name string `json:"Name" xml:"Name" position:"Query" name:"Name"`
|
Name string `name:"Name"`
|
||||||
NfsVolumePath string `json:"NfsVolumePath" xml:"NfsVolumePath" position:"Query" name:"NFSVolume.Path"`
|
NfsVolumePath string `name:"NFSVolume.Path"`
|
||||||
NfsVolumeServer string `json:"NfsVolumeServer" xml:"NfsVolumeServer" position:"Query" name:"NFSVolume.Server"`
|
NfsVolumeServer string `name:"NFSVolume.Server"`
|
||||||
NfsVolumeReadOnly requests.Boolean `json:"NfsVolumeReadOnly" xml:"NfsVolumeReadOnly" position:"Query" name:"NFSVolume.ReadOnly"`
|
NfsVolumeReadOnly requests.Boolean `name:"NFSVolume.ReadOnly"`
|
||||||
EmptyDirVolumeEnable requests.Boolean `json:"EmptyDirVolumeEnable" xml:"EmptyDirVolumeEnable" position:"Query" name:"EmptyDirVolume.Enable"`
|
EmptyDirVolumeEnable requests.Boolean `name:"EmptyDirVolume.Enable"`
|
||||||
ConfigFileVolumeConfigFileToPaths []ConfigFileToPath `json:"ConfigFileVolumeConfigFileToPaths" xml:"ConfigFileVolume" position:"Query" name:"ConfigFileVolume.ConfigFileToPath" type:"Repeated"`
|
ConfigFileToPaths []ConfigFileToPath `name:"ConfigFileVolume.ConfigFileToPath" type:"Repeated"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ import "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
|||||||
|
|
||||||
// VolumeMount is a nested struct in eci response
|
// VolumeMount is a nested struct in eci response
|
||||||
type VolumeMount struct {
|
type VolumeMount struct {
|
||||||
MountPath string `json:"MountPath" xml:"MountPath" position:"Query" name:"MountPath"`
|
MountPath string `name:"MountPath"`
|
||||||
ReadOnly requests.Boolean `json:"ReadOnly" xml:"ReadOnly" position:"Query" name:"ReadOnly"`
|
ReadOnly requests.Boolean `name:"ReadOnly"`
|
||||||
Name string `json:"Name" xml:"Name" position:"Query" name:"Name"`
|
Name string `name:"Name"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,16 +5,19 @@ Azure Container Instances (ACI) provide a hosted environment for running contain
|
|||||||
The Azure Container Instances provider for the Virtual Kubelet configures an ACI instance as a node in any Kubernetes cluster. When using the Virtual Kubelet ACI provider, pods can be scheduled on an ACI instance as if the ACI instance is a standard Kubernetes node. This configuration allows you to take advantage of both the capabilities of Kubernetes and the management value and cost benefit of ACI.
|
The Azure Container Instances provider for the Virtual Kubelet configures an ACI instance as a node in any Kubernetes cluster. When using the Virtual Kubelet ACI provider, pods can be scheduled on an ACI instance as if the ACI instance is a standard Kubernetes node. This configuration allows you to take advantage of both the capabilities of Kubernetes and the management value and cost benefit of ACI.
|
||||||
|
|
||||||
This document details configuring the Virtual Kubelet ACI provider.
|
This document details configuring the Virtual Kubelet ACI provider.
|
||||||
|
|
||||||
#### Table of Contents
|
#### Table of Contents
|
||||||
|
|
||||||
* [Prerequiste](#prerequisite)
|
* [Prerequiste](#prerequisite)
|
||||||
* [Quick set-up with the ACI Connector](#quick-set-up-with-the-aci-connector)
|
* [Quick set-up with the ACI Connector](#quick-set-up-with-the-aci-connector)
|
||||||
* [Manual set-up](#manual-set-up)
|
* [Manual set-up](#manual-set-up)
|
||||||
|
* [Create a cluster with a Virtual Network](#create-an-aks-cluster-with-vnet)
|
||||||
* [Validate the Virtual Kubelet ACI provider](#validate-the-virtual-kubelet-aci-provider)
|
* [Validate the Virtual Kubelet ACI provider](#validate-the-virtual-kubelet-aci-provider)
|
||||||
* [Schedule a pod in ACI](#schedule-a-pod-in-aci)
|
* [Schedule a pod in ACI](#schedule-a-pod-in-aci)
|
||||||
* [Work arounds](#work-arounds-for-the-aci-connector)
|
* [Work arounds](#work-arounds-for-the-aci-connector)
|
||||||
* [Upgrade the ACI Connector ](#upgrade-the-aci-connector)
|
* [Upgrade the ACI Connector ](#upgrade-the-aci-connector)
|
||||||
* [Remove the Virtual Kubelet](#remove-the-virtual-kubelet)
|
* [Remove the Virtual Kubelet](#remove-the-virtual-kubelet)
|
||||||
|
|
||||||
## Prerequisite
|
## Prerequisite
|
||||||
|
|
||||||
This guide assumes that you have a Kubernetes cluster up and running (can be `minikube`) and that `kubectl` is already configured to talk to it.
|
This guide assumes that you have a Kubernetes cluster up and running (can be `minikube`) and that `kubectl` is already configured to talk to it.
|
||||||
@@ -120,7 +123,7 @@ First let's identify your Azure subscription and save it for use later on in the
|
|||||||
|
|
||||||
## Quick set-up with the ACI Connector
|
## Quick set-up with the ACI Connector
|
||||||
|
|
||||||
The Azure cli can be used to install the ACI provider. We like to say Azure's provider or implementation for Virtual Kubelet is the ACI Connector.
|
The Azure cli can be used to install the ACI provider. We like to say Azure's provider or implementation for Virtual Kubelet is the ACI Connector. Please note that this command has no Virtual Networking support.
|
||||||
For this section Virtual Kubelet's specific ACI provider will be referenced as the the ACI Connector.
|
For this section Virtual Kubelet's specific ACI provider will be referenced as the the ACI Connector.
|
||||||
If you continue with this section you can skip sections below up to "Schedule a pod in ACI", as we use Azure Container Service (AKS) to easily deploy and install the connector, thus it is assumed
|
If you continue with this section you can skip sections below up to "Schedule a pod in ACI", as we use Azure Container Service (AKS) to easily deploy and install the connector, thus it is assumed
|
||||||
that you've created an [AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough).
|
that you've created an [AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough).
|
||||||
@@ -164,10 +167,10 @@ az group create --name aci-group --location "$ACI_REGION"
|
|||||||
export AZURE_RG=aci-group
|
export AZURE_RG=aci-group
|
||||||
```
|
```
|
||||||
|
|
||||||
### Create a service principal
|
### Create a service principal
|
||||||
|
|
||||||
This creates an identity for the Virtual Kubelet ACI provider to use when provisioning
|
This creates an identity for the Virtual Kubelet ACI provider to use when provisioning
|
||||||
resources on your account on behalf of Kubernetes.
|
resources on your account on behalf of Kubernetes. This step is optional if you are provisoning Virtual Kubelet on AKS.
|
||||||
|
|
||||||
1. Create a service principal with RBAC enabled for the quickstart:
|
1. Create a service principal with RBAC enabled for the quickstart:
|
||||||
```cli
|
```cli
|
||||||
@@ -194,7 +197,7 @@ resources on your account on behalf of Kubernetes.
|
|||||||
Run these commands to deploy the virtual kubelet which connects your Kubernetes cluster to Azure Container Instances.
|
Run these commands to deploy the virtual kubelet which connects your Kubernetes cluster to Azure Container Instances.
|
||||||
|
|
||||||
```cli
|
```cli
|
||||||
export VK_RELEASE=virtual-kubelet-0.2.0
|
export VK_RElEASE=virtual-kubelet-latest
|
||||||
```
|
```
|
||||||
|
|
||||||
If your cluster is an AKS cluster:
|
If your cluster is an AKS cluster:
|
||||||
@@ -203,18 +206,9 @@ RELEASE_NAME=virtual-kubelet
|
|||||||
NODE_NAME=virtual-kubelet
|
NODE_NAME=virtual-kubelet
|
||||||
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
|
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
|
||||||
|
|
||||||
curl https://raw.githubusercontent.com/virtual-kubelet/virtual-kubelet/master/scripts/createCertAndKey.sh > createCertAndKey.sh
|
|
||||||
chmod +x createCertAndKey.sh
|
|
||||||
. ./createCertAndKey.sh
|
|
||||||
|
|
||||||
helm install "$CHART_URL" --name "$RELEASE_NAME" \
|
helm install "$CHART_URL" --name "$RELEASE_NAME" \
|
||||||
--set provider=azure \
|
--set provider=azure \
|
||||||
--set providers.azure.targetAKS=true \
|
--set providers.azure.targetAKS=true \
|
||||||
--set providers.azure.tenantId=$AZURE_TENANT_ID \
|
|
||||||
--set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \
|
|
||||||
--set providers.azure.clientId=$AZURE_CLIENT_ID \
|
|
||||||
--set apiserverCert=$cert \
|
|
||||||
--set apiserverKey=$key
|
|
||||||
```
|
```
|
||||||
|
|
||||||
For any other type of Kubernetes cluster:
|
For any other type of Kubernetes cluster:
|
||||||
@@ -223,22 +217,16 @@ RELEASE_NAME=virtual-kubelet
|
|||||||
NODE_NAME=virtual-kubelet
|
NODE_NAME=virtual-kubelet
|
||||||
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
|
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
|
||||||
|
|
||||||
curl https://raw.githubusercontent.com/virtual-kubelet/virtual-kubelet/master/scripts/createCertAndKey.sh > createCertAndKey.sh
|
|
||||||
chmod +x createCertAndKey.sh
|
|
||||||
. ./createCertAndKey.sh
|
|
||||||
|
|
||||||
helm install "$CHART_URL" --name "$RELEASE_NAME" \
|
helm install "$CHART_URL" --name "$RELEASE_NAME" \
|
||||||
--set provider=azure \
|
--set provider=azure \
|
||||||
--set rbac.install=true \
|
--set rbac.install=true \
|
||||||
--set providers.azure.targetAKS=false \
|
--set providers.azure.targetAKS=false \
|
||||||
|
--set providers.azure.aciResourceGroup=$AZURE_RG \
|
||||||
|
--set providers.azure.aciRegion=$ACI_REGION \
|
||||||
--set providers.azure.tenantId=$AZURE_TENANT_ID \
|
--set providers.azure.tenantId=$AZURE_TENANT_ID \
|
||||||
--set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \
|
--set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \
|
||||||
--set providers.azure.clientId=$AZURE_CLIENT_ID \
|
--set providers.azure.clientId=$AZURE_CLIENT_ID \
|
||||||
--set providers.azure.clientKey=$AZURE_CLIENT_SECRET \
|
--set providers.azure.clientKey=$AZURE_CLIENT_SECRET
|
||||||
--set providers.azure.aciResourceGroup=$AZURE_RG \
|
|
||||||
--set providers.azure.aciRegion=$ACI_REGION \
|
|
||||||
--set apiserverCert=$cert \
|
|
||||||
--set apiserverKey=$key
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If your cluster has RBAC enabled set ```rbac.install=true```
|
If your cluster has RBAC enabled set ```rbac.install=true```
|
||||||
@@ -273,6 +261,168 @@ To verify that virtual kubelet has started, run:
|
|||||||
```cli
|
```cli
|
||||||
kubectl --namespace=default get pods -l "app=virtual-kubelet-virtual-kubelet"
|
kubectl --namespace=default get pods -l "app=virtual-kubelet-virtual-kubelet"
|
||||||
```
|
```
|
||||||
|
## Create an AKS cluster with VNet
|
||||||
|
|
||||||
|
Run the following commands to create an AKS cluster with a new Azure virtual network. Also, create two subnets. One will be delegated to the cluster and the other will be delegated to Azure Container Instances.
|
||||||
|
|
||||||
|
### Create an Azure virtual network and subnets
|
||||||
|
|
||||||
|
First, set the following variables for your VNet range and two subnet ranges within that VNet. The following ranges are recommended for those just trying out the connector with VNet.
|
||||||
|
|
||||||
|
**Bash**
|
||||||
|
```cli
|
||||||
|
export VNET_RANGE=10.0.0.0/8
|
||||||
|
export CLUSTER_SUBNET_RANGE=10.240.0.0/16
|
||||||
|
export ACI_SUBNET_RANGE=10.241.0.0/16
|
||||||
|
export VNET_NAME=myAKSVNet
|
||||||
|
export CLUSTER_SUBNET_NAME=myAKSSubnet
|
||||||
|
export ACI_SUBNET_NAME=myACISubnet
|
||||||
|
export AKS_CLUSTER_RG=myresourcegroup
|
||||||
|
export KUBE_DNS_IP=10.0.0.10
|
||||||
|
```
|
||||||
|
Run the following command to create a virtual network within Azure, and a subnet within that VNet. The subnet will be dedicated to the nodes in the AKS cluster.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
az network vnet create \
|
||||||
|
--resource-group $AKS_CLUSTER_RG \
|
||||||
|
--name $VNET_NAME \
|
||||||
|
--address-prefixes $VNET_RANGE \
|
||||||
|
--subnet-name $CLUSTER_SUBNET_NAME \
|
||||||
|
--subnet-prefix $CLUSTER_SUBNET_RANGE
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a subnet that will be delegated to just resources within ACI, note that this needs to be an empty subnet, but within the same VNet that you already created.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
az network vnet subnet create \
|
||||||
|
--resource-group $AKS_CLUSTER_RG \
|
||||||
|
--vnet-name $VNET_NAME \
|
||||||
|
--name $ACI_SUBNET_NAME \
|
||||||
|
--address-prefix $ACI_SUBNET_RANGE
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create a service principal (OPTIONAL)
|
||||||
|
|
||||||
|
Create an Azure Active Directory service principal to allow AKS to interact with other Azure resources. You can use a pre-created service principal too.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
az ad sp create-for-rbac -n "virtual-kubelet-sp" --skip-assignment
|
||||||
|
```
|
||||||
|
|
||||||
|
The output should look similar to the following.
|
||||||
|
|
||||||
|
```console
|
||||||
|
{
|
||||||
|
"appId": "bef76eb3-d743-4a97-9534-03e9388811fc",
|
||||||
|
"displayName": "azure-cli-2018-08-29-22-29-29",
|
||||||
|
"name": "http://azure-cli-2018-08-29-22-29-29",
|
||||||
|
"password": "1d257915-8714-4ce7-xxxxxxxxxxxxx",
|
||||||
|
"tenant": "72f988bf-86f1-41af-91ab-2d7cd011db48"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Save the output values from the command output in enviroment variables.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
export AZURE_TENANT_ID=<Tenant>
|
||||||
|
export AZURE_CLIENT_ID=<AppId>
|
||||||
|
export AZURE_CLIENT_SECRET=<Password>
|
||||||
|
```
|
||||||
|
|
||||||
|
These values can be integrated into the `az aks create` as a field ` --service-principal $AZURE_CLIENT_ID \`.
|
||||||
|
|
||||||
|
### Integrating Azure VNet Resource
|
||||||
|
|
||||||
|
If you want to integrate an already created Azure VNet resource with your AKS cluster than follow these steps.
|
||||||
|
Grab the virtual network resource id with the following command:
|
||||||
|
|
||||||
|
```cli
|
||||||
|
az network vnet show --resource-group $AKS_CLUSTER_RG --name $VNET_NAME --query id -o tsv
|
||||||
|
```
|
||||||
|
|
||||||
|
Grant access to the AKS cluster to use the virtual network by creating a role and assigning it.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
az role assignment create --assignee $AZURE_CLIENT_ID --scope <vnetId> --role NetworkContributor
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create an AKS cluster with a virtual network
|
||||||
|
|
||||||
|
Grab the id of the cluster subnet you created earlier with the following command.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
az network vnet subnet show --resource-group $AKS_CLUSTER_RG --vnet-name $VNET_NAME --name $CLUSTER_SUBNET_NAME --query id -o tsv
|
||||||
|
```
|
||||||
|
|
||||||
|
Save the entire output starting witn "/subscriptions/..." in the following enviorment variable.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
export VNET_SUBNET_ID=<subnet-resource>
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the following command to create an AKS cluster with the virtual network you've already created.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
az aks create \
|
||||||
|
--resource-group myResourceGroup \
|
||||||
|
--name myAKSCluster \
|
||||||
|
--node-count 1 \
|
||||||
|
--network-plugin azure \
|
||||||
|
--service-cidr 10.0.0.0/16 \
|
||||||
|
--dns-service-ip $KUBE_DNS_IP \
|
||||||
|
--docker-bridge-address 172.17.0.1/16 \
|
||||||
|
--vnet-subnet-id $VNET_SUBNET_ID \
|
||||||
|
--client-secret $AZURE_CLIENT_SECRET
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deploy Virtual Kubelet
|
||||||
|
|
||||||
|
Manually deploy the Virtual Kubelet, the following env. variables have already been set earlier. You do need to pass through the subnet you created for ACI earlier, otherwise the container instances will not be able to participate with the other pods within the cluster subnet.
|
||||||
|
|
||||||
|
Grab the public master URI for your Kubernetes cluster and save the value.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
kubectl cluster-info
|
||||||
|
export MASTER_URI=<public uri>
|
||||||
|
```
|
||||||
|
|
||||||
|
Set the following values for the helm chart.
|
||||||
|
|
||||||
|
```cli
|
||||||
|
RELEASE_NAME=virtual-kubelet
|
||||||
|
NODE_NAME=virtual-kubelet
|
||||||
|
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
|
||||||
|
```
|
||||||
|
|
||||||
|
If your cluster is an AKS cluster:
|
||||||
|
|
||||||
|
```cli
|
||||||
|
helm install "$CHART_URL" --name "$RELEASE_NAME" \
|
||||||
|
--set provider=azure \
|
||||||
|
--set providers.azure.targetAKS=true \
|
||||||
|
--set providers.azure.vnet.enabled=true \
|
||||||
|
--set providers.azure.vnet.subnetName=$ACI_SUBNET_NAME \
|
||||||
|
--set providers.azure.vent.subnetCidr=$ACI_SUBNET_RANGE \
|
||||||
|
--set providers.azure.vnet.clusterCidr=$CLUSTER_SUBNET_RANGE \
|
||||||
|
--set providers.azure.vnet.kubeDnsIp=$KUBE_DNS_IP \
|
||||||
|
--set providers.azure.masterUri=$MASTER_URI
|
||||||
|
```
|
||||||
|
|
||||||
|
For any other type of cluster:
|
||||||
|
|
||||||
|
```cli
|
||||||
|
helm install "$CHART_URL" --name "$RELEASE_NAME" \
|
||||||
|
--set provider=azure \
|
||||||
|
--set providers.azure.targetAKS=false \
|
||||||
|
--set providers.azure.vnet.enabled=true \
|
||||||
|
--set providers.azure.vnet.subnetName=$ACI_SUBNET_NAME \
|
||||||
|
--set providers.azure.vent.subnetCidr=$ACI_SUBNET_RANGE \
|
||||||
|
--set providers.azure.vnet.kubeDnsIp=$KUBE_DNS_IP \
|
||||||
|
--set providers.azure.tenantId=$AZURE_TENANT_ID \
|
||||||
|
--set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \
|
||||||
|
--set providers.azure.aciResourceGroup=$AZURE_RG \
|
||||||
|
--set providers.azure.aciRegion=$ACI_REGION \
|
||||||
|
--set providers.azure.masterUri=$MASTER_URI
|
||||||
|
```
|
||||||
|
|
||||||
## Validate the Virtual Kubelet ACI provider
|
## Validate the Virtual Kubelet ACI provider
|
||||||
|
|
||||||
@@ -294,7 +444,7 @@ aks-nodepool1-39289454-2 Ready agent 22h v1.7.7
|
|||||||
|
|
||||||
## Schedule a pod in ACI
|
## Schedule a pod in ACI
|
||||||
|
|
||||||
Create a file named `virtual-kubelet-test.yaml` and copy in the following YAML. Replace the `nodeName` value with the name given to the virtual kubelet node.
|
Create a file named `virtual-kubelet-test.yaml` and copy in the following YAML.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
@@ -328,9 +478,13 @@ spec:
|
|||||||
effect: NoSchedule
|
effect: NoSchedule
|
||||||
```
|
```
|
||||||
|
|
||||||
Notice that Virtual-Kubelet nodes are tainted by default to avoid unexpected pods running on them, i.e. kube-proxy, other virtual-kubelet pods, etc. To schedule a pod to them, you need to add the tolerations to your pod spec:
|
Notice that Virtual-Kubelet nodes are tainted by default to avoid unexpected pods running on them, i.e. kube-proxy, other virtual-kubelet pods, etc. To schedule a pod to them, you need to add the toleration to the pod spec and a node selector:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
nodeSelector:
|
||||||
|
kubernetes.io/role: agent
|
||||||
|
beta.kubernetes.io/os: linux
|
||||||
|
type: virtual-kubelet
|
||||||
tolerations:
|
tolerations:
|
||||||
- key: virtual-kubelet.io/provider
|
- key: virtual-kubelet.io/provider
|
||||||
operator: Exists
|
operator: Exists
|
||||||
@@ -365,6 +519,12 @@ Notice that the `helloworld` pod is running on the `virtual-kubelet` node.
|
|||||||
NAME READY STATUS RESTARTS AGE IP NODE
|
NAME READY STATUS RESTARTS AGE IP NODE
|
||||||
aci-helloworld-2559879000-8vmjw 1/1 Running 0 39s 52.179.3.180 virtual-kubelet
|
aci-helloworld-2559879000-8vmjw 1/1 Running 0 39s 52.179.3.180 virtual-kubelet
|
||||||
|
|
||||||
|
```
|
||||||
|
If the AKS cluster was configured with a virtual network, then the output will look like the following. The container instance will get a private ip rather than a public one.
|
||||||
|
|
||||||
|
```console
|
||||||
|
NAME READY STATUS RESTARTS AGE IP NODE
|
||||||
|
aci-helloworld-9b55975f-bnmfl 1/1 Running 0 4m 10.241.0.4 virtual-kubelet
|
||||||
```
|
```
|
||||||
|
|
||||||
To validate that the container is running in an Azure Container Instance, use the [az container list][az-container-list] Azure CLI command.
|
To validate that the container is running in an Azure Container Instance, use the [az container list][az-container-list] Azure CLI command.
|
||||||
@@ -448,7 +608,7 @@ ERROR: logging before flag.Parse: E0914 00:02:01.546132 1 streamwatcher.go
|
|||||||
time="2018-09-14T00:02:01Z" level=error msg="Pod watcher connection is closed unexpectedly" namespace= node=virtual-kubelet-myconnector-linux operatingSystem=Linux provider=azure
|
time="2018-09-14T00:02:01Z" level=error msg="Pod watcher connection is closed unexpectedly" namespace= node=virtual-kubelet-myconnector-linux operatingSystem=Linux provider=azure
|
||||||
```
|
```
|
||||||
|
|
||||||
Then copy the master URL with cluster-info.
|
Then copy the master URI with cluster-info.
|
||||||
|
|
||||||
```cli
|
```cli
|
||||||
kubectl cluster-info
|
kubectl cluster-info
|
||||||
@@ -480,7 +640,7 @@ Edit the deployment.
|
|||||||
kubectl edit deploy virtual-kubelet-virtual-kubelet
|
kubectl edit deploy virtual-kubelet-virtual-kubelet
|
||||||
```
|
```
|
||||||
|
|
||||||
Add the following name and value to the deployment in the enviorment section. Use your copied master URL.
|
Add the following name and value to the deployment in the enviorment section. Use your copied AKS master URI.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
--name: MASTER_URI
|
--name: MASTER_URI
|
||||||
@@ -535,7 +695,7 @@ If you've installed Virtual Kubelet with the Azure cli so you're using the ACI C
|
|||||||
Run the following command to upgrade your ACI Connector.
|
Run the following command to upgrade your ACI Connector.
|
||||||
|
|
||||||
```cli
|
```cli
|
||||||
az aks upgrade-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name myconnector --os-type linux
|
az aks upgrade-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name virtual-kubelet --os-type linux
|
||||||
```
|
```
|
||||||
|
|
||||||
## Remove the Virtual Kubelet
|
## Remove the Virtual Kubelet
|
||||||
@@ -548,7 +708,7 @@ helm delete virtual-kubelet --purge
|
|||||||
If you used the ACI Connector installation then use the following command to remove the the ACI Connector from your cluster.
|
If you used the ACI Connector installation then use the following command to remove the the ACI Connector from your cluster.
|
||||||
|
|
||||||
```cli
|
```cli
|
||||||
az aks remove-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name myconnector --os-type linux
|
az aks remove-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name virtual-kubelet --os-type linux
|
||||||
```
|
```
|
||||||
|
|
||||||
<!-- LINKS -->
|
<!-- LINKS -->
|
||||||
|
|||||||
@@ -75,6 +75,7 @@ type ACIProvider struct {
|
|||||||
networkProfile string
|
networkProfile string
|
||||||
kubeProxyExtension *aci.Extension
|
kubeProxyExtension *aci.Extension
|
||||||
kubeDNSIP string
|
kubeDNSIP string
|
||||||
|
extraUserAgent string
|
||||||
|
|
||||||
metricsSync sync.Mutex
|
metricsSync sync.Mutex
|
||||||
metricsSyncTime time.Time
|
metricsSyncTime time.Time
|
||||||
@@ -196,7 +197,9 @@ func NewACIProvider(config string, rm *manager.ResourceManager, nodeName, operat
|
|||||||
azAuth.SubscriptionID = subscriptionID
|
azAuth.SubscriptionID = subscriptionID
|
||||||
}
|
}
|
||||||
|
|
||||||
p.aciClient, err = aci.NewClient(azAuth)
|
p.extraUserAgent = os.Getenv("ACI_EXTRA_USER_AGENT")
|
||||||
|
|
||||||
|
p.aciClient, err = aci.NewClient(azAuth, p.extraUserAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -315,7 +318,7 @@ func NewACIProvider(config string, rm *manager.ResourceManager, nodeName, operat
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *ACIProvider) setupNetworkProfile(auth *client.Authentication) error {
|
func (p *ACIProvider) setupNetworkProfile(auth *client.Authentication) error {
|
||||||
c, err := network.NewClient(auth)
|
c, err := network.NewClient(auth, p.extraUserAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating azure networking client: %v", err)
|
return fmt.Errorf("error creating azure networking client: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,9 +13,9 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// BaseURI is the default URI used for compute services.
|
// BaseURI is the default URI used for compute services.
|
||||||
baseURI = "https://management.azure.com"
|
baseURI = "https://management.azure.com"
|
||||||
userAgent = "virtual-kubelet/azure-arm-aci/2018-09-01"
|
defaultUserAgent = "virtual-kubelet/azure-arm-aci/2018-09-01"
|
||||||
apiVersion = "2018-09-01"
|
apiVersion = "2018-09-01"
|
||||||
|
|
||||||
containerGroupURLPath = "subscriptions/{{.subscriptionId}}/resourceGroups/{{.resourceGroup}}/providers/Microsoft.ContainerInstance/containerGroups/{{.containerGroupName}}"
|
containerGroupURLPath = "subscriptions/{{.subscriptionId}}/resourceGroups/{{.resourceGroup}}/providers/Microsoft.ContainerInstance/containerGroups/{{.containerGroupName}}"
|
||||||
containerGroupListURLPath = "subscriptions/{{.subscriptionId}}/providers/Microsoft.ContainerInstance/containerGroups"
|
containerGroupListURLPath = "subscriptions/{{.subscriptionId}}/providers/Microsoft.ContainerInstance/containerGroups"
|
||||||
@@ -34,12 +34,17 @@ type Client struct {
|
|||||||
auth *azure.Authentication
|
auth *azure.Authentication
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Azure Container Instances client.
|
// NewClient creates a new Azure Container Instances client with extra user agent.
|
||||||
func NewClient(auth *azure.Authentication) (*Client, error) {
|
func NewClient(auth *azure.Authentication, extraUserAgent string) (*Client, error) {
|
||||||
if auth == nil {
|
if auth == nil {
|
||||||
return nil, fmt.Errorf("Authentication is not supplied for the Azure client")
|
return nil, fmt.Errorf("Authentication is not supplied for the Azure client")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
userAgent := []string{defaultUserAgent}
|
||||||
|
if extraUserAgent != "" {
|
||||||
|
userAgent = append(userAgent, extraUserAgent)
|
||||||
|
}
|
||||||
|
|
||||||
client, err := azure.NewClient(auth, baseURI, userAgent)
|
client, err := azure.NewClient(auth, baseURI, userAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Creating Azure client failed: %v", err)
|
return nil, fmt.Errorf("Creating Azure client failed: %v", err)
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ func TestMain(m *testing.M) {
|
|||||||
subscriptionID = auth.SubscriptionID
|
subscriptionID = auth.SubscriptionID
|
||||||
|
|
||||||
// Check if the resource group exists and create it if not.
|
// Check if the resource group exists and create it if not.
|
||||||
rgCli, err := resourcegroups.NewClient(auth)
|
rgCli, err := resourcegroups.NewClient(auth, "unit-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("creating new resourcegroups client failed: %v", err)
|
log.Fatalf("creating new resourcegroups client failed: %v", err)
|
||||||
}
|
}
|
||||||
@@ -82,7 +82,7 @@ func TestNewClient(t *testing.T) {
|
|||||||
log.Fatalf("Failed to load Azure authentication file: %v", err)
|
log.Fatalf("Failed to load Azure authentication file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := NewClient(auth)
|
c, err := NewClient(auth, "unit-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,13 +24,13 @@ type BearerAuthorizer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type userAgentTransport struct {
|
type userAgentTransport struct {
|
||||||
userAgent string
|
userAgent []string
|
||||||
base http.RoundTripper
|
base http.RoundTripper
|
||||||
client *Client
|
client *Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Azure API client from an Authentication struct and BaseURI.
|
// NewClient creates a new Azure API client from an Authentication struct and BaseURI.
|
||||||
func NewClient(auth *Authentication, baseURI string, userAgent string) (*Client, error) {
|
func NewClient(auth *Authentication, baseURI string, userAgent []string) (*Client, error) {
|
||||||
resource, err := getResourceForToken(auth, baseURI)
|
resource, err := getResourceForToken(auth, baseURI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Getting resource for token failed: %v", err)
|
return nil, fmt.Errorf("Getting resource for token failed: %v", err)
|
||||||
@@ -52,9 +52,16 @@ func NewClient(auth *Authentication, baseURI string, userAgent string) (*Client,
|
|||||||
|
|
||||||
client.BearerAuthorizer = &BearerAuthorizer{tokenProvider: tp}
|
client.BearerAuthorizer = &BearerAuthorizer{tokenProvider: tp}
|
||||||
|
|
||||||
|
nonEmptyUserAgent := userAgent[:0]
|
||||||
|
for _, ua := range userAgent {
|
||||||
|
if ua != "" {
|
||||||
|
nonEmptyUserAgent = append(nonEmptyUserAgent, ua)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
uat := userAgentTransport{
|
uat := userAgentTransport{
|
||||||
base: http.DefaultTransport,
|
base: http.DefaultTransport,
|
||||||
userAgent: userAgent,
|
userAgent: nonEmptyUserAgent,
|
||||||
client: client,
|
client: client,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,7 +84,7 @@ func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Add the user agent header.
|
// Add the user agent header.
|
||||||
newReq.Header["User-Agent"] = []string{t.userAgent}
|
newReq.Header["User-Agent"] = []string{strings.Join(t.userAgent, " ")}
|
||||||
|
|
||||||
// Add the content-type header.
|
// Add the content-type header.
|
||||||
newReq.Header["Content-Type"] = []string{"application/json"}
|
newReq.Header["Content-Type"] = []string{"application/json"}
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
baseURI = "https://management.azure.com"
|
baseURI = "https://management.azure.com"
|
||||||
userAgent = "virtual-kubelet/azure-arm-networking/2018-07-01"
|
defaultUserAgent = "virtual-kubelet/azure-arm-network/2018-08-01"
|
||||||
apiVersion = "2018-07-01"
|
apiVersion = "2018-08-01"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client is a client for interacting with Azure networking
|
// Client is a client for interacting with Azure networking
|
||||||
@@ -25,11 +25,16 @@ type Client struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new client for interacting with azure networking
|
// NewClient creates a new client for interacting with azure networking
|
||||||
func NewClient(azAuth *azure.Authentication) (*Client, error) {
|
func NewClient(azAuth *azure.Authentication, extraUserAgent string) (*Client, error) {
|
||||||
if azAuth == nil {
|
if azAuth == nil {
|
||||||
return nil, fmt.Errorf("Authentication is not supplied for the Azure client")
|
return nil, fmt.Errorf("Authentication is not supplied for the Azure client")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
userAgent := []string{defaultUserAgent}
|
||||||
|
if extraUserAgent != "" {
|
||||||
|
userAgent = append(userAgent, extraUserAgent)
|
||||||
|
}
|
||||||
|
|
||||||
client, err := azure.NewClient(azAuth, baseURI, userAgent)
|
client, err := azure.NewClient(azAuth, baseURI, userAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Creating Azure client failed: %v", err)
|
return nil, fmt.Errorf("Creating Azure client failed: %v", err)
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ func TestMain(m *testing.M) {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := resourcegroups.NewClient(testAuth)
|
c, err := resourcegroups.NewClient(testAuth, "unit-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
@@ -68,7 +68,7 @@ func newTestClient(t *testing.T) *Client {
|
|||||||
if err := setupAuth(); err != nil {
|
if err := setupAuth(); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
c, err := NewClient(testAuth)
|
c, err := NewClient(testAuth, "unit-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,9 +9,9 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
// BaseURI is the default URI used for compute services.
|
// BaseURI is the default URI used for compute services.
|
||||||
BaseURI = "https://management.azure.com"
|
BaseURI = "https://management.azure.com"
|
||||||
userAgent = "virtual-kubelet/azure-arm-resourcegroups/2017-12-01"
|
defaultUserAgent = "virtual-kubelet/azure-arm-resourcegroups/2017-12-01"
|
||||||
apiVersion = "2017-08-01"
|
apiVersion = "2017-08-01"
|
||||||
|
|
||||||
resourceGroupURLPath = "subscriptions/{{.subscriptionId}}/resourcegroups/{{.resourceGroupName}}"
|
resourceGroupURLPath = "subscriptions/{{.subscriptionId}}/resourcegroups/{{.resourceGroupName}}"
|
||||||
)
|
)
|
||||||
@@ -26,11 +26,16 @@ type Client struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new Azure resource groups client.
|
// NewClient creates a new Azure resource groups client.
|
||||||
func NewClient(auth *azure.Authentication) (*Client, error) {
|
func NewClient(auth *azure.Authentication, extraUserAgent string) (*Client, error) {
|
||||||
if auth == nil {
|
if auth == nil {
|
||||||
return nil, fmt.Errorf("Authentication is not supplied for the Azure client")
|
return nil, fmt.Errorf("Authentication is not supplied for the Azure client")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
userAgent := []string{defaultUserAgent}
|
||||||
|
if extraUserAgent != "" {
|
||||||
|
userAgent = append(userAgent, extraUserAgent)
|
||||||
|
}
|
||||||
|
|
||||||
client, err := azure.NewClient(auth, BaseURI, userAgent)
|
client, err := azure.NewClient(auth, BaseURI, userAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Creating Azure client failed: %v", err)
|
return nil, fmt.Errorf("Creating Azure client failed: %v", err)
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ func TestNewClient(t *testing.T) {
|
|||||||
t.Fatalf("Failed to load Azure authentication file: %v", err)
|
t.Fatalf("Failed to load Azure authentication file: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := NewClient(auth)
|
c, err := NewClient(auth, "unit-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
// +build !no_alicooud_provider
|
// +build !no_alicloud_provider
|
||||||
|
|
||||||
package register
|
package register
|
||||||
|
|
||||||
|
|||||||
@@ -30,11 +30,10 @@ func (s *Server) onAddPod(ctx context.Context, obj interface{}) {
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
logger := log.G(ctx).WithField("method", "onAddPod")
|
logger := log.G(ctx).WithField("method", "onAddPod")
|
||||||
|
|
||||||
pod := obj.(*corev1.Pod)
|
pod, ok := obj.(*corev1.Pod)
|
||||||
|
if !ok {
|
||||||
if pod == nil {
|
span.SetStatus(trace.Status{Code: trace.StatusCodeInvalidArgument, Message: fmt.Sprintf("Unexpected object from event: %T", obj)})
|
||||||
span.SetStatus(trace.Status{Code: trace.StatusCodeInvalidArgument, Message: fmt.Sprintf("Unexpected object from event: %v", obj)})
|
logger.Errorf("obj is not of a valid type: %T", obj)
|
||||||
logger.Errorf("obj is not a valid pod: %v", obj)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -53,11 +52,10 @@ func (s *Server) onUpdatePod(ctx context.Context, obj interface{}) {
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
logger := log.G(ctx).WithField("method", "onUpdatePod")
|
logger := log.G(ctx).WithField("method", "onUpdatePod")
|
||||||
|
|
||||||
pod := obj.(*corev1.Pod)
|
pod, ok := obj.(*corev1.Pod)
|
||||||
|
if !ok {
|
||||||
if pod == nil {
|
span.SetStatus(trace.Status{Code: trace.StatusCodeInvalidArgument, Message: fmt.Sprintf("Unexpected object from event: %T", obj)})
|
||||||
span.SetStatus(trace.Status{Code: trace.StatusCodeInvalidArgument, Message: fmt.Sprintf("Unexpected object from event: %v", obj)})
|
logger.Errorf("obj is not of a valid type: %T", obj)
|
||||||
logger.Errorf("obj is not a valid pod: %v", obj)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,12 +74,20 @@ func (s *Server) onDeletePod(ctx context.Context, obj interface{}) {
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
logger := log.G(ctx).WithField("method", "onDeletePod")
|
logger := log.G(ctx).WithField("method", "onDeletePod")
|
||||||
|
|
||||||
pod := obj.(*corev1.Pod)
|
pod, ok := obj.(*corev1.Pod)
|
||||||
|
if !ok {
|
||||||
|
delta, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||||
|
if !ok {
|
||||||
|
span.SetStatus(trace.Status{Code: trace.StatusCodeInvalidArgument, Message: fmt.Sprintf("Unexpected object from event: %T", obj)})
|
||||||
|
logger.Errorf("obj is not of a valid type: %T", obj)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if pod == nil {
|
if pod, ok = delta.Obj.(*corev1.Pod); !ok {
|
||||||
span.SetStatus(trace.Status{Code: trace.StatusCodeInvalidArgument, Message: fmt.Sprintf("Unexpected object from event: %v", obj)})
|
span.SetStatus(trace.Status{Code: trace.StatusCodeInvalidArgument, Message: fmt.Sprintf("Unexpected object from event: %T", obj)})
|
||||||
logger.Errorf("obj is not a valid pod: %v", obj)
|
logger.Errorf("obj is not of a valid type: %T", obj)
|
||||||
return
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
addPodAttributes(span, pod)
|
addPodAttributes(span, pod)
|
||||||
|
|||||||
Reference in New Issue
Block a user