Compare commits

..

5 Commits

Author SHA1 Message Date
Robbie Zhang
3670d9fa20 Update virtual-kubelet-for-aks Helm Chart to Use 'latest' Image (#104) 2018-02-27 10:57:48 -08:00
Ria Bhatia
7e74e9c844 Merge pull request #94 from avranju/add-readme
Add readme for web provider
2018-02-26 18:40:13 -08:00
Bhargav Nookala
6f748f4375 Adding support for ACI DNS name labels (#97) 2018-02-23 14:06:23 -08:00
lcastellano
d80dbbe561 Provide method to correctly build Pod Key from names in the mock provider (#99) 2018-02-23 11:31:00 -08:00
Rajasekharan Vengalil
2abccb0257 Add readme for web provider
Also rename sample web provider implemented in Rust to `web-rust`.
2018-02-11 15:20:44 +05:30
24 changed files with 259 additions and 41 deletions

View File

@@ -58,6 +58,7 @@ a `virtual-kubelet` node.
* Environment variables
* Public IPs
* kubectl logs
* DNS name labels
## Current Limitations
@@ -116,6 +117,10 @@ You can find detailed instructions on how to set it up and how to test it in the
The Azure connector can use a configuration file specified by the `--provider-config` flag.
The config file is in TOML format, and an example lives in `providers/azure/example.toml`.
#### More Details
See the [ACI Readme](providers/azure/README.md)
### Hyper.sh Provider
The Hyper.sh Provider allows Kubernetes clusters to deploy Hyper.sh containers

View File

@@ -1,6 +1,6 @@
image:
repository: microsoft/virtual-kubelet
tag: 0.2-beta-6
tag: latest
pullPolicy: Always
env:
azureClientId:

View File

@@ -284,15 +284,60 @@ Name ResourceGroup ProvisioningState Image
helloworld-2559879000-8vmjw myResourceGroup Succeeded microsoft/aci-helloworld 52.179.3.180:80 1.0 core/1.5 gb Linux eastus
```
### Schedule an ACI pod with a DNS Name label
Add an annotation to your Pod manifest, `virtualkubelet.io/dnsnamelabel` keyed to what you'd like the Azure Container Instance to receive as a DNS Name, and deploy it.
```yaml
apiVersion: v1
kind: Pod
metadata:
name: helloworld
annotations:
virtualkubelet.io/dnsnamelabel: "helloworld-aci"
spec:
containers:
- image: microsoft/aci-helloworld
imagePullPolicy: Always
name: helloworld
resources:
requests:
memory: 1G
cpu: 1
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 443
name: https
dnsPolicy: ClusterFirst
nodeName: virtual-kubelet
```
To confirm the Azure Container Instance received and bound the DNS Name specified, use the [az container show][az-container-show] Azure CLI command. Virtual Kubelet's naming
convention will affect how you use this query, with the argument to `-n` broken down as: nameSpace-podName. Unless specified, Kubernetes will assume
the namespace is `default`.
```azurecli-interactive
az container show -g myResourceGroup -n default-helloworld --query ipAddress.fqdn
```
Output:
```console
"helloworld-aci.westus.azurecontainer.io"
```
## Remove the Virtual Kubelet
You can remove your Virtual Kubelet node, you can delete the Helm deployment, by running the following command:
You can remove your Virtual Kubelet node by deleting the Helm deployment. Run the following command:
```
helm delete virtual-kubelet --purge
```
```
<!-- LINKS -->
[kubectl-create]: https://kubernetes.io/docs/user-guide/kubectl/v1.6/#create
[kubectl-get]: https://kubernetes.io/docs/user-guide/kubectl/v1.8/#get
[az-container-list]: https://docs.microsoft.com/en-us/cli/azure/aks?view=azure-cli-latest#az_aks_list
[az-container-list]: https://docs.microsoft.com/en-us/cli/azure/container?view=azure-cli-latest#az_container_list
[az-container-show]: https://docs.microsoft.com/en-us/cli/azure/container?view=azure-cli-latest#az_container_show

View File

@@ -26,6 +26,8 @@ import (
// The service account secret mount path.
const serviceAccountSecretMountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
const virtualKubeletDNSNameLabel = "virtualkubelet.io/dnsnamelabel"
// ACIProvider implements the virtual-kubelet provider interface and communicates with Azure's ACI APIs.
type ACIProvider struct {
aciClient *aci.Client
@@ -231,6 +233,10 @@ func (p *ACIProvider) CreatePod(pod *v1.Pod) error {
Ports: ports,
Type: "Public",
}
if dnsNameLabel := pod.Annotations[virtualKubeletDNSNameLabel]; dnsNameLabel != "" {
containerGroup.ContainerGroupProperties.IPAddress.DNSNameLabel = dnsNameLabel
}
}
podUID := string(pod.UID)

View File

@@ -10,8 +10,8 @@ import (
const (
// BaseURI is the default URI used for compute services.
BaseURI = "https://management.azure.com"
userAgent = "virtual-kubelet/azure-arm-aci/2017-12-01"
apiVersion = "2017-12-01-preview"
userAgent = "virtual-kubelet/azure-arm-aci/2018-02-01"
apiVersion = "2018-02-01-preview"
containerGroupURLPath = "subscriptions/{{.subscriptionId}}/resourceGroups/{{.resourceGroup}}/providers/Microsoft.ContainerInstance/containerGroups/{{.containerGroupName}}"
containerGroupListURLPath = "subscriptions/{{.subscriptionId}}/providers/Microsoft.ContainerInstance/containerGroups"

View File

@@ -172,9 +172,10 @@ type ImageRegistryCredential struct {
// IPAddress is IP address for the container group.
type IPAddress struct {
Ports []Port `json:"ports,omitempty"`
Type string `json:"type,omitempty"`
IP string `json:"ip,omitempty"`
Ports []Port `json:"ports,omitempty"`
Type string `json:"type,omitempty"`
IP string `json:"ip,omitempty"`
DNSNameLabel string `json:"dnsNameLabel,omitempty"`
}
// Logs is the logs.

View File

@@ -77,9 +77,9 @@ func (p *MockProvider) DeletePod(pod *v1.Pod) (err error) {
// GetPod returns a pod by name that is stored in memory.
func (p *MockProvider) GetPod(namespace, name string) (pod *v1.Pod, err error) {
log.Printf("receive GetPod %q\n", pod.Name)
log.Printf("receive GetPod %q\n", name)
key, err := buildKey(pod)
key, err := buildKeyFromNames(namespace, name)
if err != nil {
return nil, err
}
@@ -246,6 +246,10 @@ func (p *MockProvider) OperatingSystem() string {
return providers.OperatingSystemLinux
}
func buildKeyFromNames(namespace string, name string) (string, error) {
return fmt.Sprintf("%s-%s", namespace, name), nil
}
// buildKey is a helper for building the "key" for the providers pod store.
func buildKey(pod *v1.Pod) (string, error) {
if pod.ObjectMeta.Namespace == "" {
@@ -256,5 +260,5 @@ func buildKey(pod *v1.Pod) (string, error) {
return "", fmt.Errorf("pod name not found")
}
return fmt.Sprintf("%s-%s", pod.ObjectMeta.Namespace, pod.ObjectMeta.Name), nil
return buildKeyFromNames(pod.ObjectMeta.Namespace, pod.ObjectMeta.Name)
}

157
providers/web/README.md Normal file
View File

@@ -0,0 +1,157 @@
Web provider for Virtual Kubelet
================================
Virtual Kubelet providers are written using the Go programming language. While
Go is a great general purpose programming language, it is however a fact that
other programming languages exist. The problem that Virtual Kubelet solves is as
applicable to applications written in those languages as it is for those written
using Go. This provider aims to serve as a bridge between technology stacks and
programming languages, as it were, by adapting the Virtual Kubelet provider
interface using a web endpoint, i.e., this provider is a thin layer that
forwards all calls that Kubernetes makes to the virtual kubelet to a
pre-configured HTTP endpoint. This frees the provider's implementor to write
their code in any programming language and technology stack as they see fit.
The `providers/web/web-rust` folder contains a sample provider implemented in
the Rust programming language. Here's a diagram that depicts the interaction
between Kubernetes, the virtual kubelet web provider and the Rust app.
+----------------+ +---------------------------+ +------------------------------+
| | | | HTTP | |
| Kubernetes | <-----> | Virtual Kubelet: Web | <------> | Provider written in Rust |
| | | | | |
+----------------+ +---------------------------+ +------------------------------+
Provider interface
------------------
The web provider uses an environment variable to determine the endpoint to use
for forwarding requests. The environment variable must be named
`WEB_ENDPOINT_URL` and must implement the following HTTP API:
| Path | Verb | Query | Request | Response | Description |
|-------------------|--------|-----------------------------------------|----------|---------------------------------------------------|---------------------------------------------------------------------------|
| /createPod | POST | - | Pod JSON | HTTP status code | Create a new pod |
| /updatePod | PUT | - | Pod JSON | HTTP status code | Update pod spec |
| /deletePod | DELETE | - | Pod JSON | HTTP status code | Delete an existing pod |
| /getPod | GET | namespace, name | - | Pod JSON | Given a pod namespace and name, return the pod JSON |
| /getContainerLogs | GET | namespace, podName, containerName, tail | - | Container logs | Given the namespace, pod name and container name, return `tail` log lines |
| /getPodStatus | GET | namespace, name | - | Pod status JSON | Given a pod namespace and name, return the pod's status JSON |
| /getPods | GET | - | - | Array of pod JSON strings | Fetch list of created pods |
| /capacity | GET | - | - | JSON map containing resource name and values | Fetch resource capacity values |
| /nodeConditions | GET | - | - | Array of node condition JSON strings | Get list of node conditions (Ready, OutOfDisk etc) |
| /nodeAddresses | GET | - | - | Array of node address values (type/address pairs) | Fetch a list of addresses for the node status |
A typical deployment configuration for this setup would be to have the provider
implementation be deployed as a container in the same pod as the virtual kubelet
itself (as a "sidecar").
Take her for a spin
-------------------
A sample web provider implementation is included in this repo in order to
showcase what this enables. The sample has been implemented in
[Rust](http://rust-lang.org). The easiest way to get this up and running is
to use the Helm chart available at `providers/web/charts/web-rust`. Open a
terminal and install the chart like so:
$ cd providers/web/charts
$ helm install -n web-provider ./web-rust
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
web-provider-virtual-kubelet-web-6b5b7446f6-279xl 2/2 Running 0 3h
If you list the nodes in the cluster after this you should see something that
looks like this:
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
aks-nodepool1-35187879-0 Ready agent 37d v1.8.2
aks-nodepool1-35187879-1 Ready agent 37d v1.8.2
aks-nodepool1-35187879-3 Ready agent 37d v1.8.2
virtual-kubelet-web Ready agent 3h v1.8.3
In case the name of the node didn't give it away, the last entry in the output
above is the virtual kubelet. If you try to list the containers in the pod that
represents the virtual kubelet you should be able to see the sidecar Rust
container:
$ kubectl get pods -o=custom-columns=NAME:.metadata.name,CONTAINERS:.spec.containers[*].name
NAME CONTAINERS
web-provider-virtual-kubelet-web-6b5b7446f6-279xl webrust,virtualkubelet
In the output above, `webrust` is the sidecar container and `virtualkubelet` is
the broker that forwards requests to `webrust`. You can run a query on the
`/getPods` HTTP endpoint on the `webrust` container to see a list of the pods
that it has been asked to create. To do this we first use `kubectl` to setup a
port forwarding server like so:
$ kubectl port-forward web-provider-virtual-kubelet-web-6b5b7446f6-279xl 3000:3000
Now if we run `curl` on the `http://localhost:3000/getPods` URL you should see
the pod JSON getting dumped to the terminal. I ran my test on a Kubernetes
cluster deployed on [Azure](https://docs.microsoft.com/en-us/azure/aks/) which
happens to deploy a daemonset with some, what I imagine are "system" pods to
every node in the cluster. You can filter the output to see just the pod names
using the [jq](https://stedolan.github.io/jq/) tool like so:
$ curl -s http://localhost:3000/getPods | jq '.[] | { name: .metadata.name }'
{
"name": "kube-proxy-czz57"
}
{
"name": "kube-svc-redirect-7qlpd"
}
You can deploy workloads to the virtual kubelet as you normally do. Here's a
sample pod spec that uses `nodeSelector` to cause the deployment to be scheduled
on the virtual kubelet.
apiVersion: v1
kind: Pod
metadata:
name: vk-pod
labels:
foo: bar
spec:
containers:
- name: web1
image: nginx
nodeSelector:
type: virtual-kubelet
Let's go ahead and deploy the pod and run our `/getPods` query again:
$ kubectl apply -f ~/tmp/pod1.yaml
pod "vk-pod" created
$ curl -s http://localhost:3000/getPods | jq '.[] | { name: .metadata.name }'
{
"name": "kube-proxy-czz57"
}
{
"name": "kube-svc-redirect-7qlpd"
}
{
"name": "vk-pod"
}
$ kubectl get pods
NAME READY STATUS RESTARTS AGE
vk-pod 0/1 Running 0 1m
web-provider-virtual-kubelet-web-6b5b7446f6-279xl 2/2 Running 6 4h
As you can tell, a new pod has been scheduled to run on our virtual kubelet
instance. Deleting pods works as one would expect:
$ kubectl delete -f ~/tmp/pod1.yaml
pod "vk-pod" deleted
$ curl -s http://localhost:3000/getPods | jq '.[] | { name: .metadata.name }'
{
"name": "kube-proxy-czz57"
}
{
"name": "kube-svc-redirect-7qlpd"
}

View File

@@ -19,7 +19,7 @@ spec:
release: {{ .Release.Name }}
spec:
containers:
- name: rustwebprovider
- name: webrust
image: "{{ .Values.rustwebimage.repository }}:{{ .Values.rustwebimage.tag }}"
imagePullPolicy: {{ .Values.rustwebimage.pullPolicy }}
ports:

View File

@@ -1,5 +1,5 @@
rustwebimage:
repository: avranju/rust-web-provider
repository: avranju/web-rust
tag: latest
pullPolicy: Always
port: 3000

View File

@@ -1,9 +0,0 @@
FROM debian:stretch-slim
WORKDIR /app
ADD ./rust-web-provider /app/rust-web-provider
ENV RUST_LOG=info
EXPOSE 3000
ENTRYPOINT [ "/app/rust-web-provider" ]

View File

@@ -558,17 +558,6 @@ dependencies = [
"url 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "rust-web-provider"
version = "0.1.0"
dependencies = [
"env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"kube_rust 1.0.0 (git+https://github.com/avranju/kube-rust?rev=058de6366d0d75cb60b2d0fd5ba1abd2e7d83fff)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"virtual-kubelet-adapter 0.1.0 (git+https://github.com/avranju/rust-virtual-kubelet-adapter?rev=4250103d31e2864725e47bdd23295e79ee12b6d0)",
]
[[package]]
name = "rustc-serialize"
version = "0.3.24"
@@ -852,6 +841,17 @@ name = "void"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "web-rust"
version = "0.1.0"
dependencies = [
"env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"kube_rust 1.0.0 (git+https://github.com/avranju/kube-rust?rev=058de6366d0d75cb60b2d0fd5ba1abd2e7d83fff)",
"log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
"virtual-kubelet-adapter 0.1.0 (git+https://github.com/avranju/rust-virtual-kubelet-adapter?rev=4250103d31e2864725e47bdd23295e79ee12b6d0)",
]
[[package]]
name = "winapi"
version = "0.2.8"

View File

@@ -1,5 +1,5 @@
[package]
name = "rust-web-provider"
name = "web-rust"
version = "0.1.0"
authors = ["Rajasekharan Vengalil <rajave@microsoft.com>"]

View File

@@ -0,0 +1,9 @@
FROM debian:stretch-slim
WORKDIR /app
ADD ./web-rust /app/web-rust
ENV RUST_LOG=info
EXPOSE 3000
ENTRYPOINT [ "/app/web-rust" ]

View File

@@ -6,8 +6,8 @@ SCRIPT_NAME=$(basename "$0")
DIR=$(cd "$(dirname "$0")" && pwd)
ROOT_FOLDER="$DIR/.."
PUBLISH_DIR=$ROOT_FOLDER/target/publish
TARGET_NAME=rust-web-provider
IMAGE_NAME=rust-web-provider
TARGET_NAME=web-rust
IMAGE_NAME=web-rust
IMAGE_VERSION=latest
BUILD_RELEASE=true
SOURCE_RELEASE_DIR=$ROOT_FOLDER/target/release
@@ -20,7 +20,7 @@ usage()
echo "Note: You might have to run this as root or sudo."
echo ""
echo "options"
echo " -i, --image-name Image name (default: rust-web-provider)"
echo " -i, --image-name Image name (default: web-rust)"
echo " -v, --image-version Docker Image Version (default: latest)"
echo " -r, --build-release Build release configuration - true|false (default: true)"
exit 1;

View File

@@ -74,7 +74,7 @@ impl Provider for UnitProvider {
info!("Getting pod: {}", name);
self.pods_map
.get(name)
.filter(|pod| {
.xfilter(|pod| {
let empty = String::from("");
let ns = pod.metadata()
.map(|m| m.namespace())

View File

@@ -1,9 +1,9 @@
pub trait Filter<T> {
fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self;
fn xfilter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self;
}
impl<T> Filter<T> for Option<T> {
fn filter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
fn xfilter<P: FnOnce(&T) -> bool>(self, predicate: P) -> Self {
match self {
Some(x) => {
if predicate(&x) {