Merge branch 'master' into 1_worker_default

This commit is contained in:
Jeremy Rickard
2018-11-02 13:37:06 -06:00
committed by GitHub
20 changed files with 260 additions and 86 deletions

View File

@@ -53,11 +53,12 @@ a `virtual-kubelet` node.
## Current Features
- create, delete and update pods
- container logs
- container logs, exec, and metrics
- get pod, pods and pod status
- capacity
- node addresses, node capacity, node daemon endpoints
- operating system
- bring your own virtual network
## Command-Line Usage
@@ -229,7 +230,7 @@ You can generate this file by following the instructions listed in the
### Missing Load Balancer IP addresses for services
#### When Virtual Kubelet is installed on a cluster, you cannot create external-IPs for a Service
#### Providers that do not support service discovery
Kubernetes 1.9 introduces a new flag, `ServiceNodeExclusion`, for the control plane's Controller Manager. Enabling this flag in the Controller Manager's manifest allows Kubernetes to exclude Virtual Kubelet nodes from being added to Load Balancer pools, allowing you to create public facing services with external IPs without issue.

Binary file not shown.

Binary file not shown.

View File

@@ -1,5 +1,5 @@
name: virtual-kubelet-for-aks
version: 0.1.8
version: 0.1.9
description: a Helm chart to install virtual kubelet in an AKS or ACS cluster.
sources:
- https://github.com/virtual-kubelet/virtual-kubelet

View File

@@ -41,6 +41,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: ACI_EXTRA_USER_AGENT
value: {{ printf "helm-chart/aks/%s/%s" .Chart.Name .Chart.Version }}
- name: ACI_SUBNET_NAME
value: {{ .Values.env.aciVnetSubnetName }}
- name: ACI_SUBNET_CIDR

Binary file not shown.

View File

@@ -1,6 +1,6 @@
name: virtual-kubelet
version: 0.4.0
appVersion: 0.4
version: 0.5.0
appVersion: 0.5
description: A Helm chart to install virtual kubelet inside a Kubernetes cluster.
icon: https://avatars2.githubusercontent.com/u/34250142
sources:

View File

@@ -60,6 +60,8 @@ spec:
value: {{ .aciResourceGroup }}
- name: ACI_REGION
value: {{ .aciRegion }}
- name: ACI_EXTRA_USER_AGENT
value: {{ printf "helm-chart/aks/%s/%s" $.Chart.Name $.Chart.Version }}
{{- else }}
- name: AZURE_AUTH_LOCATION
value: /etc/virtual-kubelet/credentials.json
@@ -67,6 +69,8 @@ spec:
value: {{ required "aciResourceGroup is required" .aciResourceGroup }}
- name: ACI_REGION
value: {{ required "aciRegion is required" .aciRegion }}
- name: ACI_EXTRA_USER_AGENT
value: {{ printf "helm-chart/other/%s/%s" $.Chart.Name $.Chart.Version }}
{{- end }}
{{- if .vnet.enabled }}
- name: ACI_SUBNET_NAME

View File

@@ -178,7 +178,8 @@ func (p *ECIProvider) CreatePod(ctx context.Context, pod *v1.Pod) error {
request.RestartPolicy = string(pod.Spec.RestartPolicy)
// get containers
containers, err := p.getContainers(pod)
containers, err := p.getContainers(pod, false)
initContainers, err := p.getContainers(pod, true)
if err != nil {
return err
}
@@ -197,6 +198,7 @@ func (p *ECIProvider) CreatePod(ctx context.Context, pod *v1.Pod) error {
// assign all the things
request.Containers = containers
request.InitContainers = initContainers
request.Volumes = volumes
request.ImageRegistryCredentials = creds
CreationTimestamp := pod.CreationTimestamp.UTC().Format(podTagTimeFormat)
@@ -539,9 +541,13 @@ func readDockerConfigJSONSecret(secret *v1.Secret, ips []eci.ImageRegistryCreden
return ips, err
}
func (p *ECIProvider) getContainers(pod *v1.Pod) ([]eci.CreateContainer, error) {
containers := make([]eci.CreateContainer, 0, len(pod.Spec.Containers))
for _, container := range pod.Spec.Containers {
func (p *ECIProvider) getContainers(pod *v1.Pod, init bool) ([]eci.CreateContainer, error) {
podContainers := pod.Spec.Containers
if init {
podContainers = pod.Spec.InitContainers
}
containers := make([]eci.CreateContainer, 0, len(podContainers))
for _, container := range podContainers {
c := eci.CreateContainer{
Name: container.Name,
Image: container.Image,
@@ -646,9 +652,9 @@ func (p *ECIProvider) getVolumes(pod *v1.Pod) ([]eci.Volume, error) {
if len(ConfigFileToPaths) != 0 {
volumes = append(volumes, eci.Volume{
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
Name: v.Name,
ConfigFileVolumeConfigFileToPaths: ConfigFileToPaths,
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
Name: v.Name,
ConfigFileToPaths: ConfigFileToPaths,
})
}
continue
@@ -672,9 +678,9 @@ func (p *ECIProvider) getVolumes(pod *v1.Pod) ([]eci.Volume, error) {
if len(ConfigFileToPaths) != 0 {
volumes = append(volumes, eci.Volume{
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
Name: v.Name,
ConfigFileVolumeConfigFileToPaths: ConfigFileToPaths,
Type: eci.VOL_TYPE_CONFIGFILEVOLUME,
Name: v.Name,
ConfigFileToPaths: ConfigFileToPaths,
})
}
continue

View File

@@ -77,6 +77,7 @@ func (client *Client) CreateContainerGroupWithCallback(request *CreateContainerG
type CreateContainerGroupRequest struct {
*requests.RpcRequest
Containers []CreateContainer `position:"Query" name:"Container" type:"Repeated"`
InitContainers []CreateContainer `position:"Query" name:"InitContainer" type:"Repeated"`
ResourceOwnerId requests.Integer `position:"Query" name:"ResourceOwnerId"`
SecurityGroupId string `position:"Query" name:"SecurityGroupId"`
ImageRegistryCredentials []ImageRegistryCredential `position:"Query" name:"ImageRegistryCredential" type:"Repeated"`
@@ -92,17 +93,17 @@ type CreateContainerGroupRequest struct {
}
type CreateContainer struct {
Name string `position:"Query" name:"Name"`
Image string `position:"Query" name:"Image"`
Memory requests.Float `position:"Query" name:"Memory"`
Cpu requests.Float `position:"Query" name:"Cpu"`
WorkingDir string `position:"Query" name:"WorkingDir"`
ImagePullPolicy string `position:"Query" name:"ImagePullPolicy"`
Commands []string `position:"Query" name:"Command" type:"Repeated"`
Args []string `position:"Query" name:"Arg" type:"Repeated"`
VolumeMounts []VolumeMount `position:"Query" name:"VolumeMount" type:"Repeated"`
Ports []ContainerPort `position:"Query" name:"Port" type:"Repeated"`
EnvironmentVars []EnvironmentVar `position:"Query" name:"EnvironmentVar" type:"Repeated"`
Name string `name:"Name"`
Image string `name:"Image"`
Memory requests.Float `name:"Memory"`
Cpu requests.Float `name:"Cpu"`
WorkingDir string `name:"WorkingDir"`
ImagePullPolicy string `name:"ImagePullPolicy"`
Commands []string `name:"Command" type:"Repeated"`
Args []string `name:"Arg" type:"Repeated"`
VolumeMounts []VolumeMount `name:"VolumeMount" type:"Repeated"`
Ports []ContainerPort `name:"Port" type:"Repeated"`
EnvironmentVars []EnvironmentVar `name:"EnvironmentVar" type:"Repeated"`
}
// CreateContainerGroupImageRegistryCredential is a repeated param struct in CreateContainerGroupRequest
@@ -115,8 +116,8 @@ type ImageRegistryCredential struct {
// CreateContainerGroupResponse is the response struct for api CreateContainerGroup
type CreateContainerGroupResponse struct {
*responses.BaseResponse
RequestId string `json:"RequestId" xml:"RequestId"`
ContainerGroupId string `json:"ContainerGroupId" xml:"ContainerGroupId"`
RequestId string
ContainerGroupId string
}
// CreateCreateContainerGroupRequest creates a request to invoke CreateContainerGroup API

View File

@@ -17,6 +17,6 @@ package eci
// ConfigFileVolumeConfigFileToPath is a nested struct in eci response
type ConfigFileToPath struct {
Content string `json:"Content" xml:"Content" position:"Query" name:"Content"`
Path string `json:"Path" xml:"Path" position:"Query" name:"Path"`
Content string `name:"Content"`
Path string `name:"Path"`
}

View File

@@ -17,18 +17,18 @@ package eci
// Container is a nested struct in eci response
type Container struct {
Name string `json:"Name" xml:"Name" position:"Query" name:"Name"`
Image string `json:"Image" xml:"Image" position:"Query" name:"Image"`
Memory float64 `json:"Memory" xml:"Memory" position:"Query" name:"Memory"`
Cpu float64 `json:"Cpu" xml:"Cpu" position:"Query" name:"Cpu" `
Name string `json:"Name" xml:"Name" `
Image string `json:"Image" xml:"Image"`
Memory float64 `json:"Memory" xml:"Memory"`
Cpu float64 `json:"Cpu" xml:"Cpu"`
RestartCount int `json:"RestartCount" xml:"RestartCount"`
WorkingDir string `json:"WorkingDir" xml:"WorkingDir" position:"Query" name:"WorkingDir"`
ImagePullPolicy string `json:"ImagePullPolicy" xml:"ImagePullPolicy" position:"Query" name:"ImagePullPolicy"`
Commands []string `json:"Commands" xml:"Commands" position:"Query" name:"Command" type:"Repeated"`
Args []string `json:"Args" xml:"Args" position:"Query" name:"Arg" type:"Repeated"`
WorkingDir string `json:"WorkingDir" xml:"WorkingDir"`
ImagePullPolicy string `json:"ImagePullPolicy" xml:"ImagePullPolicy"`
Commands []string `json:"Commands" xml:"Commands"`
Args []string `json:"Args" xml:"Args"`
PreviousState ContainerState `json:"PreviousState" xml:"PreviousState"`
CurrentState ContainerState `json:"CurrentState" xml:"CurrentState"`
VolumeMounts []VolumeMount `json:"VolumeMounts" xml:"VolumeMounts" position:"Query" name:"VolumeMount" type:"Repeated"`
Ports []ContainerPort `json:"Ports" xml:"Ports" position:"Query" name:"Port" type:"Repeated"`
EnvironmentVars []EnvironmentVar `json:"EnvironmentVars" xml:"EnvironmentVars" position:"Query" name:"EnvironmentVar" type:"Repeated"`
VolumeMounts []VolumeMount `json:"VolumeMounts" xml:"VolumeMounts"`
Ports []ContainerPort `json:"Ports" xml:"Ports"`
EnvironmentVars []EnvironmentVar `json:"EnvironmentVars" xml:"EnvironmentVars"`
}

View File

@@ -21,6 +21,6 @@ import (
// ContainerPort is a nested struct in eci response
type ContainerPort struct {
Port requests.Integer `json:"Port" xml:"Port" position:"Query" name:"Port"`
Protocol string `json:"Protocol" xml:"Protocol" position:"Query" name:"Protocol"`
Port requests.Integer `name:"Port"`
Protocol string `name:"Protocol"`
}

View File

@@ -17,6 +17,6 @@ package eci
// EnvironmentVar is a nested struct in eci response
type EnvironmentVar struct {
Key string `json:"Key" xml:"Key" position:"Query" name:"Key"`
Value string `json:"Value" xml:"Value" position:"Query" name:"Value"`
Key string `name:"Key"`
Value string `name:"Value"`
}

View File

@@ -17,6 +17,6 @@ package eci
// Label is a nested struct in eci response
type Tag struct {
Key string `json:"Key" xml:"Key" position:"Query" name:"Key"`
Value string `json:"Value" xml:"Value" position:"Query" name:"Value"`
Key string `name:"Key"`
Value string `name:"Value"`
}

View File

@@ -25,11 +25,11 @@ const (
)
type Volume struct {
Type string `json:"Type" xml:"Type" position:"Query" name:"Type"`
Name string `json:"Name" xml:"Name" position:"Query" name:"Name"`
NfsVolumePath string `json:"NfsVolumePath" xml:"NfsVolumePath" position:"Query" name:"NFSVolume.Path"`
NfsVolumeServer string `json:"NfsVolumeServer" xml:"NfsVolumeServer" position:"Query" name:"NFSVolume.Server"`
NfsVolumeReadOnly requests.Boolean `json:"NfsVolumeReadOnly" xml:"NfsVolumeReadOnly" position:"Query" name:"NFSVolume.ReadOnly"`
EmptyDirVolumeEnable requests.Boolean `json:"EmptyDirVolumeEnable" xml:"EmptyDirVolumeEnable" position:"Query" name:"EmptyDirVolume.Enable"`
ConfigFileVolumeConfigFileToPaths []ConfigFileToPath `json:"ConfigFileVolumeConfigFileToPaths" xml:"ConfigFileVolume" position:"Query" name:"ConfigFileVolume.ConfigFileToPath" type:"Repeated"`
Type string `name:"Type"`
Name string `name:"Name"`
NfsVolumePath string `name:"NFSVolume.Path"`
NfsVolumeServer string `name:"NFSVolume.Server"`
NfsVolumeReadOnly requests.Boolean `name:"NFSVolume.ReadOnly"`
EmptyDirVolumeEnable requests.Boolean `name:"EmptyDirVolume.Enable"`
ConfigFileToPaths []ConfigFileToPath `name:"ConfigFileVolume.ConfigFileToPath" type:"Repeated"`
}

View File

@@ -19,7 +19,7 @@ import "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
// VolumeMount is a nested struct in eci response
type VolumeMount struct {
MountPath string `json:"MountPath" xml:"MountPath" position:"Query" name:"MountPath"`
ReadOnly requests.Boolean `json:"ReadOnly" xml:"ReadOnly" position:"Query" name:"ReadOnly"`
Name string `json:"Name" xml:"Name" position:"Query" name:"Name"`
MountPath string `name:"MountPath"`
ReadOnly requests.Boolean `name:"ReadOnly"`
Name string `name:"Name"`
}

View File

@@ -5,16 +5,19 @@ Azure Container Instances (ACI) provide a hosted environment for running contain
The Azure Container Instances provider for the Virtual Kubelet configures an ACI instance as a node in any Kubernetes cluster. When using the Virtual Kubelet ACI provider, pods can be scheduled on an ACI instance as if the ACI instance is a standard Kubernetes node. This configuration allows you to take advantage of both the capabilities of Kubernetes and the management value and cost benefit of ACI.
This document details configuring the Virtual Kubelet ACI provider.
#### Table of Contents
* [Prerequiste](#prerequisite)
* [Quick set-up with the ACI Connector](#quick-set-up-with-the-aci-connector)
* [Manual set-up](#manual-set-up)
* [Create a cluster with a Virtual Network](#create-an-aks-cluster-with-vnet)
* [Validate the Virtual Kubelet ACI provider](#validate-the-virtual-kubelet-aci-provider)
* [Schedule a pod in ACI](#schedule-a-pod-in-aci)
* [Work arounds](#work-arounds-for-the-aci-connector)
* [Upgrade the ACI Connector ](#upgrade-the-aci-connector)
* [Remove the Virtual Kubelet](#remove-the-virtual-kubelet)
## Prerequisite
This guide assumes that you have a Kubernetes cluster up and running (can be `minikube`) and that `kubectl` is already configured to talk to it.
@@ -120,7 +123,7 @@ First let's identify your Azure subscription and save it for use later on in the
## Quick set-up with the ACI Connector
The Azure cli can be used to install the ACI provider. We like to say Azure's provider or implementation for Virtual Kubelet is the ACI Connector.
The Azure cli can be used to install the ACI provider. We like to say Azure's provider or implementation for Virtual Kubelet is the ACI Connector. Please note that this command has no Virtual Networking support.
For this section Virtual Kubelet's specific ACI provider will be referenced as the the ACI Connector.
If you continue with this section you can skip sections below up to "Schedule a pod in ACI", as we use Azure Container Service (AKS) to easily deploy and install the connector, thus it is assumed
that you've created an [AKS cluster](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough).
@@ -164,10 +167,10 @@ az group create --name aci-group --location "$ACI_REGION"
export AZURE_RG=aci-group
```
### Create a service principal
### Create a service principal
This creates an identity for the Virtual Kubelet ACI provider to use when provisioning
resources on your account on behalf of Kubernetes.
resources on your account on behalf of Kubernetes. This step is optional if you are provisoning Virtual Kubelet on AKS.
1. Create a service principal with RBAC enabled for the quickstart:
```cli
@@ -194,7 +197,7 @@ resources on your account on behalf of Kubernetes.
Run these commands to deploy the virtual kubelet which connects your Kubernetes cluster to Azure Container Instances.
```cli
export VK_RELEASE=virtual-kubelet-0.2.0
export VK_RElEASE=virtual-kubelet-latest
```
If your cluster is an AKS cluster:
@@ -203,18 +206,9 @@ RELEASE_NAME=virtual-kubelet
NODE_NAME=virtual-kubelet
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
curl https://raw.githubusercontent.com/virtual-kubelet/virtual-kubelet/master/scripts/createCertAndKey.sh > createCertAndKey.sh
chmod +x createCertAndKey.sh
. ./createCertAndKey.sh
helm install "$CHART_URL" --name "$RELEASE_NAME" \
--set provider=azure \
--set providers.azure.targetAKS=true \
--set providers.azure.tenantId=$AZURE_TENANT_ID \
--set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \
--set providers.azure.clientId=$AZURE_CLIENT_ID \
--set apiserverCert=$cert \
--set apiserverKey=$key
```
For any other type of Kubernetes cluster:
@@ -223,22 +217,16 @@ RELEASE_NAME=virtual-kubelet
NODE_NAME=virtual-kubelet
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
curl https://raw.githubusercontent.com/virtual-kubelet/virtual-kubelet/master/scripts/createCertAndKey.sh > createCertAndKey.sh
chmod +x createCertAndKey.sh
. ./createCertAndKey.sh
helm install "$CHART_URL" --name "$RELEASE_NAME" \
--set provider=azure \
--set rbac.install=true \
--set providers.azure.targetAKS=false \
--set providers.azure.aciResourceGroup=$AZURE_RG \
--set providers.azure.aciRegion=$ACI_REGION \
--set providers.azure.tenantId=$AZURE_TENANT_ID \
--set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \
--set providers.azure.clientId=$AZURE_CLIENT_ID \
--set providers.azure.clientKey=$AZURE_CLIENT_SECRET \
--set providers.azure.aciResourceGroup=$AZURE_RG \
--set providers.azure.aciRegion=$ACI_REGION \
--set apiserverCert=$cert \
--set apiserverKey=$key
--set providers.azure.clientKey=$AZURE_CLIENT_SECRET
```
If your cluster has RBAC enabled set ```rbac.install=true```
@@ -273,6 +261,168 @@ To verify that virtual kubelet has started, run:
```cli
kubectl --namespace=default get pods -l "app=virtual-kubelet-virtual-kubelet"
```
## Create an AKS cluster with VNet
Run the following commands to create an AKS cluster with a new Azure virtual network. Also, create two subnets. One will be delegated to the cluster and the other will be delegated to Azure Container Instances.
### Create an Azure virtual network and subnets
First, set the following variables for your VNet range and two subnet ranges within that VNet. The following ranges are recommended for those just trying out the connector with VNet.
**Bash**
```cli
export VNET_RANGE=10.0.0.0/8
export CLUSTER_SUBNET_RANGE=10.240.0.0/16
export ACI_SUBNET_RANGE=10.241.0.0/16
export VNET_NAME=myAKSVNet
export CLUSTER_SUBNET_NAME=myAKSSubnet
export ACI_SUBNET_NAME=myACISubnet
export AKS_CLUSTER_RG=myresourcegroup
export KUBE_DNS_IP=10.0.0.10
```
Run the following command to create a virtual network within Azure, and a subnet within that VNet. The subnet will be dedicated to the nodes in the AKS cluster.
```cli
az network vnet create \
--resource-group $AKS_CLUSTER_RG \
--name $VNET_NAME \
--address-prefixes $VNET_RANGE \
--subnet-name $CLUSTER_SUBNET_NAME \
--subnet-prefix $CLUSTER_SUBNET_RANGE
```
Create a subnet that will be delegated to just resources within ACI, note that this needs to be an empty subnet, but within the same VNet that you already created.
```cli
az network vnet subnet create \
--resource-group $AKS_CLUSTER_RG \
--vnet-name $VNET_NAME \
--name $ACI_SUBNET_NAME \
--address-prefix $ACI_SUBNET_RANGE
```
### Create a service principal (OPTIONAL)
Create an Azure Active Directory service principal to allow AKS to interact with other Azure resources. You can use a pre-created service principal too.
```cli
az ad sp create-for-rbac -n "virtual-kubelet-sp" --skip-assignment
```
The output should look similar to the following.
```console
{
"appId": "bef76eb3-d743-4a97-9534-03e9388811fc",
"displayName": "azure-cli-2018-08-29-22-29-29",
"name": "http://azure-cli-2018-08-29-22-29-29",
"password": "1d257915-8714-4ce7-xxxxxxxxxxxxx",
"tenant": "72f988bf-86f1-41af-91ab-2d7cd011db48"
}
```
Save the output values from the command output in enviroment variables.
```cli
export AZURE_TENANT_ID=<Tenant>
export AZURE_CLIENT_ID=<AppId>
export AZURE_CLIENT_SECRET=<Password>
```
These values can be integrated into the `az aks create` as a field ` --service-principal $AZURE_CLIENT_ID \`.
### Integrating Azure VNet Resource
If you want to integrate an already created Azure VNet resource with your AKS cluster than follow these steps.
Grab the virtual network resource id with the following command:
```cli
az network vnet show --resource-group $AKS_CLUSTER_RG --name $VNET_NAME --query id -o tsv
```
Grant access to the AKS cluster to use the virtual network by creating a role and assigning it.
```cli
az role assignment create --assignee $AZURE_CLIENT_ID --scope <vnetId> --role NetworkContributor
```
### Create an AKS cluster with a virtual network
Grab the id of the cluster subnet you created earlier with the following command.
```cli
az network vnet subnet show --resource-group $AKS_CLUSTER_RG --vnet-name $VNET_NAME --name $CLUSTER_SUBNET_NAME --query id -o tsv
```
Save the entire output starting witn "/subscriptions/..." in the following enviorment variable.
```cli
export VNET_SUBNET_ID=<subnet-resource>
```
Use the following command to create an AKS cluster with the virtual network you've already created.
```cli
az aks create \
--resource-group myResourceGroup \
--name myAKSCluster \
--node-count 1 \
--network-plugin azure \
--service-cidr 10.0.0.0/16 \
--dns-service-ip $KUBE_DNS_IP \
--docker-bridge-address 172.17.0.1/16 \
--vnet-subnet-id $VNET_SUBNET_ID \
--client-secret $AZURE_CLIENT_SECRET
```
### Deploy Virtual Kubelet
Manually deploy the Virtual Kubelet, the following env. variables have already been set earlier. You do need to pass through the subnet you created for ACI earlier, otherwise the container instances will not be able to participate with the other pods within the cluster subnet.
Grab the public master URI for your Kubernetes cluster and save the value.
```cli
kubectl cluster-info
export MASTER_URI=<public uri>
```
Set the following values for the helm chart.
```cli
RELEASE_NAME=virtual-kubelet
NODE_NAME=virtual-kubelet
CHART_URL=https://github.com/virtual-kubelet/virtual-kubelet/raw/master/charts/$VK_RELEASE.tgz
```
If your cluster is an AKS cluster:
```cli
helm install "$CHART_URL" --name "$RELEASE_NAME" \
--set provider=azure \
--set providers.azure.targetAKS=true \
--set providers.azure.vnet.enabled=true \
--set providers.azure.vnet.subnetName=$ACI_SUBNET_NAME \
--set providers.azure.vent.subnetCidr=$ACI_SUBNET_RANGE \
--set providers.azure.vnet.clusterCidr=$CLUSTER_SUBNET_RANGE \
--set providers.azure.vnet.kubeDnsIp=$KUBE_DNS_IP \
--set providers.azure.masterUri=$MASTER_URI
```
For any other type of cluster:
```cli
helm install "$CHART_URL" --name "$RELEASE_NAME" \
--set provider=azure \
--set providers.azure.targetAKS=false \
--set providers.azure.vnet.enabled=true \
--set providers.azure.vnet.subnetName=$ACI_SUBNET_NAME \
--set providers.azure.vent.subnetCidr=$ACI_SUBNET_RANGE \
--set providers.azure.vnet.kubeDnsIp=$KUBE_DNS_IP \
--set providers.azure.tenantId=$AZURE_TENANT_ID \
--set providers.azure.subscriptionId=$AZURE_SUBSCRIPTION_ID \
--set providers.azure.aciResourceGroup=$AZURE_RG \
--set providers.azure.aciRegion=$ACI_REGION \
--set providers.azure.masterUri=$MASTER_URI
```
## Validate the Virtual Kubelet ACI provider
@@ -294,7 +444,7 @@ aks-nodepool1-39289454-2 Ready agent 22h v1.7.7
## Schedule a pod in ACI
Create a file named `virtual-kubelet-test.yaml` and copy in the following YAML. Replace the `nodeName` value with the name given to the virtual kubelet node.
Create a file named `virtual-kubelet-test.yaml` and copy in the following YAML.
```yaml
apiVersion: v1
@@ -328,9 +478,13 @@ spec:
effect: NoSchedule
```
Notice that Virtual-Kubelet nodes are tainted by default to avoid unexpected pods running on them, i.e. kube-proxy, other virtual-kubelet pods, etc. To schedule a pod to them, you need to add the tolerations to your pod spec:
Notice that Virtual-Kubelet nodes are tainted by default to avoid unexpected pods running on them, i.e. kube-proxy, other virtual-kubelet pods, etc. To schedule a pod to them, you need to add the toleration to the pod spec and a node selector:
```
nodeSelector:
kubernetes.io/role: agent
beta.kubernetes.io/os: linux
type: virtual-kubelet
tolerations:
- key: virtual-kubelet.io/provider
operator: Exists
@@ -365,6 +519,12 @@ Notice that the `helloworld` pod is running on the `virtual-kubelet` node.
NAME READY STATUS RESTARTS AGE IP NODE
aci-helloworld-2559879000-8vmjw 1/1 Running 0 39s 52.179.3.180 virtual-kubelet
```
If the AKS cluster was configured with a virtual network, then the output will look like the following. The container instance will get a private ip rather than a public one.
```console
NAME READY STATUS RESTARTS AGE IP NODE
aci-helloworld-9b55975f-bnmfl 1/1 Running 0 4m 10.241.0.4 virtual-kubelet
```
To validate that the container is running in an Azure Container Instance, use the [az container list][az-container-list] Azure CLI command.
@@ -448,7 +608,7 @@ ERROR: logging before flag.Parse: E0914 00:02:01.546132 1 streamwatcher.go
time="2018-09-14T00:02:01Z" level=error msg="Pod watcher connection is closed unexpectedly" namespace= node=virtual-kubelet-myconnector-linux operatingSystem=Linux provider=azure
```
Then copy the master URL with cluster-info.
Then copy the master URI with cluster-info.
```cli
kubectl cluster-info
@@ -480,7 +640,7 @@ Edit the deployment.
kubectl edit deploy virtual-kubelet-virtual-kubelet
```
Add the following name and value to the deployment in the enviorment section. Use your copied master URL.
Add the following name and value to the deployment in the enviorment section. Use your copied AKS master URI.
```yaml
--name: MASTER_URI
@@ -535,7 +695,7 @@ If you've installed Virtual Kubelet with the Azure cli so you're using the ACI C
Run the following command to upgrade your ACI Connector.
```cli
az aks upgrade-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name myconnector --os-type linux
az aks upgrade-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name virtual-kubelet --os-type linux
```
## Remove the Virtual Kubelet
@@ -548,7 +708,7 @@ helm delete virtual-kubelet --purge
If you used the ACI Connector installation then use the following command to remove the the ACI Connector from your cluster.
```cli
az aks remove-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name myconnector --os-type linux
az aks remove-connector --resource-group <aks cluster rg> --name <aks cluster name> --connector-name virtual-kubelet --os-type linux
```
<!-- LINKS -->

View File

@@ -1,4 +1,4 @@
// +build !no_alicooud_provider
// +build !no_alicloud_provider
package register