From a3bd10ab5f8728ddccb678133170e0565fb332da Mon Sep 17 00:00:00 2001 From: Alex Glikson Date: Mon, 23 Jul 2018 14:19:19 -0400 Subject: [PATCH 01/21] readme typo (#262) --- providers/azurebatch/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/azurebatch/README.md b/providers/azurebatch/README.md index 02e5cbc29..f8cc852fa 100644 --- a/providers/azurebatch/README.md +++ b/providers/azurebatch/README.md @@ -1,6 +1,6 @@ # Kubernetes Virtual Kubelet with Azure Batch -[Azure Batch](https://docs.microsoft.com/en-us/azure/batch/) provides a HPC Computing environment in Azure for distributed tasks. Azure Batch handles scheduling decrete jobs and tasks accross pools of VM's. It is commonly used for batch processing tasks such as rendering. +[Azure Batch](https://docs.microsoft.com/en-us/azure/batch/) provides a HPC Computing environment in Azure for distributed tasks. Azure Batch handles scheduling of discrete jobs and tasks accross pools of VM's. It is commonly used for batch processing tasks such as rendering. The Virtual kubelet integration allows you to take advantage of this from within Kubernetes. The primary usecase for the provider is to make it easy to use GPU based workload from normal Kubernetes clusters. For example, creating Kubernetes Jobs which train or execute ML models using Nvidia GPU's or using FFMPEG. @@ -76,4 +76,4 @@ The provider expects the following environment variables to be configured: The provider will assign pods to machines in the Azure Batch Pool. Each machine can, by default, process only one pod at a time running more than 1 pod per machine isn't currently supported and will result in errors. -Azure Batch queues tasks when no machines are available so pods will sit in `podPending` state while waiting for a VM to become available. \ No newline at end of file +Azure Batch queues tasks when no machines are available so pods will sit in `podPending` state while waiting for a VM to become available. From 047e5f22dbe7d1d81c5fc6bfdaa171dc85fb3782 Mon Sep 17 00:00:00 2001 From: Jeremy Rickard Date: Mon, 23 Jul 2018 13:12:05 -0600 Subject: [PATCH 02/21] Updating aci api version (#264) The aci provider is using a quite old version of the aci API. This PR updates it to the most recent version. Fixes: #263 --- providers/azure/client/aci/client.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/azure/client/aci/client.go b/providers/azure/client/aci/client.go index 83fe4b247..309d11adb 100644 --- a/providers/azure/client/aci/client.go +++ b/providers/azure/client/aci/client.go @@ -10,8 +10,8 @@ import ( const ( // BaseURI is the default URI used for compute services. baseURI = "https://management.azure.com" - userAgent = "virtual-kubelet/azure-arm-aci/2018-02-01" - apiVersion = "2018-02-01-preview" + userAgent = "virtual-kubelet/azure-arm-aci/2018-06-01" + apiVersion = "2018-06-01" containerGroupURLPath = "subscriptions/{{.subscriptionId}}/resourceGroups/{{.resourceGroup}}/providers/Microsoft.ContainerInstance/containerGroups/{{.containerGroupName}}" containerGroupListURLPath = "subscriptions/{{.subscriptionId}}/providers/Microsoft.ContainerInstance/containerGroups" From 9b06d180239d714a4edc3f774301bb0a19ae3ca0 Mon Sep 17 00:00:00 2001 From: Jeremy Rickard Date: Tue, 24 Jul 2018 15:08:25 -0700 Subject: [PATCH 03/21] ACI Provider: Adding Liveness/Readiness probes to ACI sdk (#267) * Adding Liveness/Readiness probes to ACI sdk * Rename Secure to SecureValue * Slightly modify failure test. * Remove errant t.Fatal line --- providers/azure/client/aci/client_test.go | 143 ++++++++++++++++++++++ providers/azure/client/aci/types.go | 31 ++++- 2 files changed, 172 insertions(+), 2 deletions(-) diff --git a/providers/azure/client/aci/client_test.go b/providers/azure/client/aci/client_test.go index aa8c9d5e6..bae449f4e 100644 --- a/providers/azure/client/aci/client_test.go +++ b/providers/azure/client/aci/client_test.go @@ -217,6 +217,149 @@ func TestListContainerGroup(t *testing.T) { } } +func TestCreateContainerGroupWithLivenessProbe(t *testing.T) { + uid := uuid.New() + congainerGroupName := containerGroup + "-" + uid.String()[0:6] + cg, err := client.CreateContainerGroup(resourceGroup, congainerGroupName, ContainerGroup{ + Location: location, + ContainerGroupProperties: ContainerGroupProperties{ + OsType: Linux, + Containers: []Container{ + { + Name: "nginx", + ContainerProperties: ContainerProperties{ + Image: "nginx", + Command: []string{"nginx", "-g", "daemon off;"}, + Ports: []ContainerPort{ + { + Protocol: ContainerNetworkProtocolTCP, + Port: 80, + }, + }, + Resources: ResourceRequirements{ + Requests: &ResourceRequests{ + CPU: 1, + MemoryInGB: 1, + }, + Limits: &ResourceLimits{ + CPU: 1, + MemoryInGB: 1, + }, + }, + LivenessProbe: &ContainerProbe{ + HTTPGet: &ContainerHTTPGetProbe{ + Port: 80, + }, + }, + }, + }, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + if cg.Name != congainerGroupName { + t.Fatalf("resource group name is %s, expected %s", cg.Name, congainerGroupName) + } +} + +func TestCreateContainerGroupFailsWithLivenessProbeMissingPort(t *testing.T) { + uid := uuid.New() + congainerGroupName := containerGroup + "-" + uid.String()[0:6] + _, err := client.CreateContainerGroup(resourceGroup, congainerGroupName, ContainerGroup{ + Location: location, + ContainerGroupProperties: ContainerGroupProperties{ + OsType: Linux, + Containers: []Container{ + { + Name: "nginx", + ContainerProperties: ContainerProperties{ + Image: "nginx", + Command: []string{"nginx", "-g", "daemon off;"}, + Ports: []ContainerPort{ + { + Protocol: ContainerNetworkProtocolTCP, + Port: 80, + }, + }, + Resources: ResourceRequirements{ + Requests: &ResourceRequests{ + CPU: 1, + MemoryInGB: 1, + }, + Limits: &ResourceLimits{ + CPU: 1, + MemoryInGB: 1, + }, + }, + LivenessProbe: &ContainerProbe{ + HTTPGet: &ContainerHTTPGetProbe{ + Path: "/", + }, + }, + }, + }, + }, + }, + }) + if err == nil { + t.Fatal("expected failure") + } +} + +func TestCreateContainerGroupWithReadinessProbe(t *testing.T) { + uid := uuid.New() + congainerGroupName := containerGroup + "-" + uid.String()[0:6] + cg, err := client.CreateContainerGroup(resourceGroup, congainerGroupName, ContainerGroup{ + Location: location, + ContainerGroupProperties: ContainerGroupProperties{ + OsType: Linux, + Containers: []Container{ + { + Name: "nginx", + ContainerProperties: ContainerProperties{ + Image: "nginx", + Command: []string{"nginx", "-g", "daemon off;"}, + Ports: []ContainerPort{ + { + Protocol: ContainerNetworkProtocolTCP, + Port: 80, + }, + }, + Resources: ResourceRequirements{ + Requests: &ResourceRequests{ + CPU: 1, + MemoryInGB: 1, + }, + Limits: &ResourceLimits{ + CPU: 1, + MemoryInGB: 1, + }, + }, + ReadinessProbe: &ContainerProbe{ + HTTPGet: &ContainerHTTPGetProbe{ + Port: 80, + Path: "/", + }, + InitialDelaySeconds: 5, + SuccessThreshold: 3, + FailureThreshold: 5, + TimeoutSeconds: 120, + }, + }, + }, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + if cg.Name != congainerGroupName { + t.Fatalf("resource group name is %s, expected %s", cg.Name, congainerGroupName) + } +} + func TestDeleteContainerGroup(t *testing.T) { err := client.DeleteContainerGroup(resourceGroup, containerGroup) if err != nil { diff --git a/providers/azure/client/aci/types.go b/providers/azure/client/aci/types.go index 1e8b06383..126bce8b5 100644 --- a/providers/azure/client/aci/types.go +++ b/providers/azure/client/aci/types.go @@ -121,6 +121,8 @@ type ContainerProperties struct { InstanceView ContainerPropertiesInstanceView `json:"instanceView,omitempty"` Resources ResourceRequirements `json:"resources,omitempty"` VolumeMounts []VolumeMount `json:"volumeMounts,omitempty"` + LivenessProbe *ContainerProbe `json:"livenessProbe,omitempty"` + ReadinessProbe *ContainerProbe `json:"readinessProbe,omitempty"` } // ContainerPropertiesInstanceView is the instance view of the container instance. Only valid in response. @@ -142,8 +144,9 @@ type ContainerState struct { // EnvironmentVariable is the environment variable to set within the container instance. type EnvironmentVariable struct { - Name string `json:"name,omitempty"` - Value string `json:"value,omitempty"` + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + SecureValue string `json:"secureValue,omitempty"` } // Event is a container group or container instance event. @@ -293,3 +296,27 @@ type ExecResponse struct { WebSocketUri string `json:"webSocketUri,omitempty"` Password string `json:"password,omitempty"` } + +// ContainerProbe is a probe definition that can be used for Liveness +// or Readiness checks. +type ContainerProbe struct { + Exec *ContainerExecProbe `json:"exec,omitempty"` + HTTPGet *ContainerHTTPGetProbe `json:"httpGet,omitempty"` + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty"` + Period int32 `json:"periodSeconds,omitempty"` + FailureThreshold int32 `json:"failureThreshold,omitempty"` + SuccessThreshold int32 `json:"successThreshold,omitempty"` + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"` +} + +// ContainerExecProbe defines a command based probe +type ContainerExecProbe struct { + Command []string `json:"command,omitempty"` +} + +// ContainerHTTPGetProbe defines an HTTP probe +type ContainerHTTPGetProbe struct { + Port int `json:"port"` + Path string `json:"path,omitempty"` + Scheme string `json:"scheme,omitempty"` +} From e41a352d635b1490fd25f84a386128ec3af48323 Mon Sep 17 00:00:00 2001 From: Rohan Chakravarthy Date: Tue, 24 Jul 2018 17:35:04 -0700 Subject: [PATCH 04/21] add support for log analytics in the ACI SDK --- providers/azure/client/aci/client_test.go | 108 ++++++++++++++++++++++ providers/azure/client/aci/types.go | 12 +++ scripts/createCredentials.sh | 8 ++ 3 files changed, 128 insertions(+) diff --git a/providers/azure/client/aci/client_test.go b/providers/azure/client/aci/client_test.go index bae449f4e..fb918cde5 100644 --- a/providers/azure/client/aci/client_test.go +++ b/providers/azure/client/aci/client_test.go @@ -1,6 +1,9 @@ package aci import ( + "encoding/json" + "fmt" + "io/ioutil" "log" "os" "strings" @@ -360,6 +363,111 @@ func TestCreateContainerGroupWithReadinessProbe(t *testing.T) { } } +func logAnalyticsWorkspaceFromFile(filepath string) (*LogAnalyticsWorkspace, error) { + analyticsdata, err := ioutil.ReadFile(filepath) + if err != nil { + return nil, fmt.Errorf("Reading LogAnalyticsWorkspace file %q failed: %v", filepath, err) + } + // Unmarshal the log analytics file. + var law LogAnalyticsWorkspace + if err := json.Unmarshal(analyticsdata, &law); err != nil { + return nil, err + } + return &law, nil +} + +func TestCreateContainerGroupWithLogAnalytics(t *testing.T) { + law, err := logAnalyticsWorkspaceFromFile("../../../../loganalytics.json") + if err != nil { + t.Fatal(err) + } + cgname := "cgla" + cg, err := client.CreateContainerGroup(resourceGroup, cgname, ContainerGroup{ + Location: location, + ContainerGroupProperties: ContainerGroupProperties{ + OsType: Linux, + Containers: []Container{ + { + Name: "nginx", + ContainerProperties: ContainerProperties{ + Image: "nginx", + Command: []string{"nginx", "-g", "daemon off;"}, + Ports: []ContainerPort{ + { + Protocol: ContainerNetworkProtocolTCP, + Port: 80, + }, + }, + Resources: ResourceRequirements{ + Requests: &ResourceRequests{ + CPU: 1, + MemoryInGB: 1, + }, + Limits: &ResourceLimits{ + CPU: 1, + MemoryInGB: 1, + }, + }, + }, + }, + }, + Diagnostics: &ContainerGroupDiagnostics{ + LogAnalytics: law, + }, + }, + }) + if err != nil { + t.Fatal(err) + } + if cg.Name != cgname { + t.Fatalf("resource group name is %s, expected %s", cg.Name, cgname) + } + if err := client.DeleteContainerGroup(resourceGroup, cgname); err != nil { + t.Fatalf("Delete Container Group failed: %s", err.Error()) + } +} + +func TestCreateContainerGroupWithInvalidLogAnalytics(t *testing.T) { + law := &LogAnalyticsWorkspace{} + _, err := client.CreateContainerGroup(resourceGroup, containerGroup, ContainerGroup{ + Location: location, + ContainerGroupProperties: ContainerGroupProperties{ + OsType: Linux, + Containers: []Container{ + { + Name: "nginx", + ContainerProperties: ContainerProperties{ + Image: "nginx", + Command: []string{"nginx", "-g", "daemon off;"}, + Ports: []ContainerPort{ + { + Protocol: ContainerNetworkProtocolTCP, + Port: 80, + }, + }, + Resources: ResourceRequirements{ + Requests: &ResourceRequests{ + CPU: 1, + MemoryInGB: 1, + }, + Limits: &ResourceLimits{ + CPU: 1, + MemoryInGB: 1, + }, + }, + }, + }, + }, + Diagnostics: &ContainerGroupDiagnostics{ + LogAnalytics: law, + }, + }, + }) + if err == nil { + t.Fatal("TestCreateContainerGroupWithInvalidLogAnalytics should fail but encountered no errors") + } +} + func TestDeleteContainerGroup(t *testing.T) { err := client.DeleteContainerGroup(resourceGroup, containerGroup) if err != nil { diff --git a/providers/azure/client/aci/types.go b/providers/azure/client/aci/types.go index 126bce8b5..1407c57e7 100644 --- a/providers/azure/client/aci/types.go +++ b/providers/azure/client/aci/types.go @@ -91,6 +91,7 @@ type ContainerGroupProperties struct { OsType OperatingSystemTypes `json:"osType,omitempty"` Volumes []Volume `json:"volumes,omitempty"` InstanceView ContainerGroupPropertiesInstanceView `json:"instanceView,omitempty"` + Diagnostics *ContainerGroupDiagnostics `json:"diagnostics,omitempty"` } // ContainerGroupPropertiesInstanceView is the instance view of the container group. Only valid in response. @@ -320,3 +321,14 @@ type ContainerHTTPGetProbe struct { Path string `json:"path,omitempty"` Scheme string `json:"scheme,omitempty"` } + +// ContainerGroupDiagnostics contains an instance of LogAnalyticsWorkspace +type ContainerGroupDiagnostics struct { + LogAnalytics *LogAnalyticsWorkspace `json:"loganalytics,omitempty"` +} + +// LogAnalyticsWorkspace defines details for a Log Analytics workspace +type LogAnalyticsWorkspace struct { + WorkspaceID string `json:"workspaceID,omitempty"` + WorkspaceKey string `json:"workspaceKey,omitempty"` +} diff --git a/scripts/createCredentials.sh b/scripts/createCredentials.sh index 4e412ee50..54ce8c591 100644 --- a/scripts/createCredentials.sh +++ b/scripts/createCredentials.sh @@ -15,6 +15,14 @@ cat < ${outputPathCredsfile} } EOF +# This will build the log analytics credentials during CI +cat < ${outputPathLogAnalyticsFile} +{ + "workspaceID": "$omsworkspaceID", + "workspaceKey": "$omsworkspaceKey" +} +EOF + # This will build the kubeConfig during the CI cat < ${outputPathKubeConfigFile} --- From 31a415c83a03d6c85287045c7d2b2654d11d8521 Mon Sep 17 00:00:00 2001 From: Daniel Mueller Date: Wed, 25 Jul 2018 11:54:22 -0700 Subject: [PATCH 05/21] Fix bug in exec command retrieval (#265) The exec command as extracted from the query comprises only the first part of the command and does not include potentially supplied parameters. E.g., $ kubectl exec pod -- ls -t /usr > command: ls This change fixes the problem by moving away from the Query.Get API. $ kubectl exec pod -- ls -t /usr > command: [ls -t /usr] --- vkubelet/apiserver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vkubelet/apiserver.go b/vkubelet/apiserver.go index 52d433011..81cca6b0c 100644 --- a/vkubelet/apiserver.go +++ b/vkubelet/apiserver.go @@ -79,7 +79,7 @@ func ApiServerHandlerExec(w http.ResponseWriter, req *http.Request) { supportedStreamProtocols := strings.Split(req.Header.Get("X-Stream-Protocol-Version"), ",") q := req.URL.Query() - command := q.Get("command") + command := q["command"] // streamOpts := &remotecommand.Options{ // Stdin: (q.Get("input") == "1"), @@ -99,5 +99,5 @@ func ApiServerHandlerExec(w http.ResponseWriter, req *http.Request) { idleTimeout := time.Second * 30 streamCreationTimeout := time.Second * 30 - remotecommand.ServeExec(w, req, p, fmt.Sprintf("%s-%s", namespace, pod), "", container, []string{command}, streamOpts, idleTimeout, streamCreationTimeout, supportedStreamProtocols) + remotecommand.ServeExec(w, req, p, fmt.Sprintf("%s-%s", namespace, pod), "", container, command, streamOpts, idleTimeout, streamCreationTimeout, supportedStreamProtocols) } From 28daffa96f2b90b8b36cf8c87c777301a1762a8a Mon Sep 17 00:00:00 2001 From: Jake Bjorke Date: Wed, 25 Jul 2018 14:04:10 -0500 Subject: [PATCH 06/21] fix type-o in azure provider documentation (#246) * fix type-o * corrected type-o where "the" should have been "then" --- providers/azure/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/providers/azure/README.md b/providers/azure/README.md index b12b6fab0..55ec036aa 100644 --- a/providers/azure/README.md +++ b/providers/azure/README.md @@ -120,7 +120,7 @@ that you've created an [AKS cluster](https://docs.microsoft.com/en-us/azure/aks/ To install the ACI Connector use the az cli and the aks namespace. Make sure to use the resource group of the aks cluster you've created and the name of the aks cluster you've created. You can choose the connector name to be anything. Choose any command below to install the Linux, Windows, or both the Windows and Linux Connector. -Note: You need to specify the --aci-resource-group, due to a bug in the az cli. The resource groupis the auto-generated. To find the name navigate to the Azure Portal resource groups, scroll down and find the name that matches MC_aks cluster name_aks rg_location. +Note: You need to specify the --aci-resource-group, due to a bug in the az cli. The resource group is then auto-generated. To find the name navigate to the Azure Portal resource groups, scroll down and find the name that matches MC_aks cluster name_aks rg_location. 1. Install the Linux ACI Connector From ef6ae9ecf4163441fbe3bcf1bcc9e04d0402389f Mon Sep 17 00:00:00 2001 From: Rohan Chakravarthy Date: Wed, 25 Jul 2018 12:37:26 -0700 Subject: [PATCH 07/21] Plumb through log analytics values (#274) * plumb through log analytics values * add option to specify a log analytics file as well * use secret for log analytics --- .gitignore | 3 ++ .../virtual-kubelet/templates/deployment.yaml | 4 ++ charts/virtual-kubelet/templates/secrets.yaml | 3 ++ charts/virtual-kubelet/values.yaml | 6 ++- providers/azure/aci.go | 21 ++++++++++ providers/azure/client/README.md | 26 +++++++++++++ providers/azure/client/aci/analytics.go | 39 +++++++++++++++++++ providers/azure/client/aci/analytics_test.go | 38 ++++++++++++++++++ providers/azure/client/aci/client_test.go | 22 +---------- 9 files changed, 141 insertions(+), 21 deletions(-) create mode 100644 providers/azure/client/aci/analytics.go create mode 100644 providers/azure/client/aci/analytics_test.go diff --git a/.gitignore b/.gitignore index c69176432..7ef1ffcf7 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,9 @@ bin/ # Test credentials file credentials.json +# Test loganalytics file +loganalytics.json + # VS Code files .vscode/ diff --git a/charts/virtual-kubelet/templates/deployment.yaml b/charts/virtual-kubelet/templates/deployment.yaml index 41ba5b766..046b86a2f 100644 --- a/charts/virtual-kubelet/templates/deployment.yaml +++ b/charts/virtual-kubelet/templates/deployment.yaml @@ -26,6 +26,10 @@ spec: value: /etc/virtual-kubelet/cert.pem - name: APISERVER_KEY_LOCATION value: /etc/virtual-kubelet/key.pem + {{ if .Values.loganalytics.enabled }} + - name: LOG_ANALYTICS_AUTH_LOCATION + value: /etc/virtual-kubelet/loganalytics.json + {{ end }} - name: VKUBELET_POD_IP valueFrom: fieldRef: diff --git a/charts/virtual-kubelet/templates/secrets.yaml b/charts/virtual-kubelet/templates/secrets.yaml index c3eb8c84c..bb25d6d74 100644 --- a/charts/virtual-kubelet/templates/secrets.yaml +++ b/charts/virtual-kubelet/templates/secrets.yaml @@ -7,3 +7,6 @@ data: credentials.json: {{ printf "{ \"clientId\": \"%s\", \"clientSecret\": \"%s\", \"subscriptionId\": \"%s\", \"tenantId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com/\", \"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"database.windows.net\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\" }" (default "MISSING" .Values.env.azureClientId) (default "MISSING" .Values.env.azureClientKey) (default "MISSING" .Values.env.azureSubscriptionId) (default "MISSING" .Values.env.azureTenantId) | b64enc | quote }} cert.pem: {{ (default "TUlTU0lORw==" .Values.env.apiserverCert) | quote }} key.pem: {{ (default "TUlTU0lORw==" .Values.env.apiserverKey) | quote }} + {{ if .Values.loganalytics.enabled }} + loganalytics.json: {{ printf "{\"workspaceID\": \"%s\",\"workspaceKey\": \"%s\"}" (required "workspaceID is required for loganalytics" .Values.loganalytics.workspaceID ) (required "workspaceKey is required for loganalytics" .Values.loganalytics.workspaceKey ) }} + {{ end }} diff --git a/charts/virtual-kubelet/values.yaml b/charts/virtual-kubelet/values.yaml index b0426297a..b1e04a6da 100644 --- a/charts/virtual-kubelet/values.yaml +++ b/charts/virtual-kubelet/values.yaml @@ -14,7 +14,11 @@ env: nodeOsType: apiserverCert: apiserverKey: - monitoredNamespace: + monitoredNamespace: +loganalytics: + enabled: false + workspaceID: + workspaceKey: # Install Default RBAC roles and bindings rbac: diff --git a/providers/azure/aci.go b/providers/azure/aci.go index e085045e0..58b533979 100644 --- a/providers/azure/aci.go +++ b/providers/azure/aci.go @@ -44,6 +44,7 @@ type ACIProvider struct { pods string internalIP string daemonEndpointPort int32 + diagnostics *aci.ContainerGroupDiagnostics } // AuthConfig is the secret returned from an ImageRegistryCredential @@ -155,6 +156,25 @@ func NewACIProvider(config string, rm *manager.ResourceManager, nodeName, operat return nil, err } + // If the log analytics file has been specified, load workspace credentials from the file + if logAnalyticsAuthFile := os.Getenv("LOG_ANALYTICS_AUTH_LOCATION"); logAnalyticsAuthFile != "" { + p.diagnostics, err = aci.NewContainerGroupDiagnosticsFromFile(logAnalyticsAuthFile) + if err != nil { + return nil, err + } + } + + // If we have both the log analytics workspace id and key, add them to the provider + // Environment variables overwrite the values provided in the file + if logAnalyticsID := os.Getenv("LOG_ANALYTICS_ID"); logAnalyticsID != "" { + if logAnalyticsKey := os.Getenv("LOG_ANALYTICS_KEY"); logAnalyticsKey != "" { + p.diagnostics, err = aci.NewContainerGroupDiagnostics(logAnalyticsID, logAnalyticsKey) + if err != nil { + return nil, err + } + } + } + if rg := os.Getenv("ACI_RESOURCE_GROUP"); rg != "" { p.resourceGroup = rg } @@ -227,6 +247,7 @@ func (p *ACIProvider) CreatePod(pod *v1.Pod) error { containerGroup.ContainerGroupProperties.Containers = containers containerGroup.ContainerGroupProperties.Volumes = volumes containerGroup.ContainerGroupProperties.ImageRegistryCredentials = creds + containerGroup.ContainerGroupProperties.Diagnostics = p.diagnostics filterServiceAccountSecretVolume(p.operatingSystem, &containerGroup) diff --git a/providers/azure/client/README.md b/providers/azure/client/README.md index 23943b52e..e07b7f2bd 100644 --- a/providers/azure/client/README.md +++ b/providers/azure/client/README.md @@ -38,3 +38,29 @@ The file looks like this, in case you want to create it yourself: "managementEndpointUrl": "https://management.core.windows.net/" } ``` + + +## Log Analytics support + +Log Analytics is supported through environment variables: +- `LOG_ANALYTICS_KEY` +- `LOG_ANALYTICS_ID` + +You can also specify a file with these values and specify the path to it in the `LOG_ANALYTICS_AUTH_LOCATION`: + +``` bash +export LOG_ANALYTICS_AUTH_LOCATION=/secure/location/loganalytics.json +``` + +``` powershell +$env:LOG_ANALYTICS_AUTH_LOCATION= "/secure/location/loganalytics.json" +``` + +The file should look like this: + +``` json +{ + "workspaceID": "", + "workspaceKey": "" +} +``` \ No newline at end of file diff --git a/providers/azure/client/aci/analytics.go b/providers/azure/client/aci/analytics.go new file mode 100644 index 000000000..0b1f00f3f --- /dev/null +++ b/providers/azure/client/aci/analytics.go @@ -0,0 +1,39 @@ +package aci + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" +) + +func NewContainerGroupDiagnostics(logAnalyticsID, logAnalyticsKey string) (*ContainerGroupDiagnostics, error) { + + if logAnalyticsID == "" || logAnalyticsKey == "" { + return nil, errors.New("Log Analytics configuration requires both the workspace ID and Key") + } + + return &ContainerGroupDiagnostics{ + LogAnalytics: &LogAnalyticsWorkspace{ + WorkspaceID: logAnalyticsID, + WorkspaceKey: logAnalyticsKey, + }, + }, nil +} + +func NewContainerGroupDiagnosticsFromFile(filepath string) (*ContainerGroupDiagnostics, error) { + + analyticsdata, err := ioutil.ReadFile(filepath) + if err != nil { + return nil, fmt.Errorf("Reading Log Analytics Auth file %q failed: %v", filepath, err) + } + // Unmarshal the log analytics file. + var law LogAnalyticsWorkspace + if err := json.Unmarshal(analyticsdata, &law); err != nil { + return nil, err + } + + return &ContainerGroupDiagnostics{ + LogAnalytics: &law, + }, nil +} diff --git a/providers/azure/client/aci/analytics_test.go b/providers/azure/client/aci/analytics_test.go new file mode 100644 index 000000000..fef69988d --- /dev/null +++ b/providers/azure/client/aci/analytics_test.go @@ -0,0 +1,38 @@ +package aci + +import ( + "io/ioutil" + "os" + "testing" +) + +func TestLogAnalyticsFileParsingSuccess(t *testing.T) { + diagnostics, err := NewContainerGroupDiagnosticsFromFile("../../../../loganalytics.json") + if err != nil { + t.Fatal(err) + } + + if diagnostics == nil || diagnostics.LogAnalytics == nil { + t.Fatalf("Unexpected nil diagnostics. Log Analytics file not parsed correctly") + } + + if diagnostics.LogAnalytics.WorkspaceID == "" || diagnostics.LogAnalytics.WorkspaceKey == "" { + t.Fatalf("Unexpected empty analytics authentication credentials. Log Analytics file not parsed correctly") + } +} + +func TestLogAnalyticsFileParsingFailure(t *testing.T) { + tempFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + _, err = NewContainerGroupDiagnosticsFromFile(tempFile.Name()) + + // Cleaup + tempFile.Close() + os.Remove(tempFile.Name()) + + if err == nil { + t.Fatalf("Expected parsing an empty Log Analytics auth file to fail, but there were no errors") + } +} diff --git a/providers/azure/client/aci/client_test.go b/providers/azure/client/aci/client_test.go index fb918cde5..0fcaf5227 100644 --- a/providers/azure/client/aci/client_test.go +++ b/providers/azure/client/aci/client_test.go @@ -1,9 +1,6 @@ package aci import ( - "encoding/json" - "fmt" - "io/ioutil" "log" "os" "strings" @@ -363,21 +360,8 @@ func TestCreateContainerGroupWithReadinessProbe(t *testing.T) { } } -func logAnalyticsWorkspaceFromFile(filepath string) (*LogAnalyticsWorkspace, error) { - analyticsdata, err := ioutil.ReadFile(filepath) - if err != nil { - return nil, fmt.Errorf("Reading LogAnalyticsWorkspace file %q failed: %v", filepath, err) - } - // Unmarshal the log analytics file. - var law LogAnalyticsWorkspace - if err := json.Unmarshal(analyticsdata, &law); err != nil { - return nil, err - } - return &law, nil -} - func TestCreateContainerGroupWithLogAnalytics(t *testing.T) { - law, err := logAnalyticsWorkspaceFromFile("../../../../loganalytics.json") + diagnostics, err := NewContainerGroupDiagnosticsFromFile("../../../../loganalytics.json") if err != nil { t.Fatal(err) } @@ -411,9 +395,7 @@ func TestCreateContainerGroupWithLogAnalytics(t *testing.T) { }, }, }, - Diagnostics: &ContainerGroupDiagnostics{ - LogAnalytics: law, - }, + Diagnostics: diagnostics, }, }) if err != nil { From 13fbd5c38e699df085029a3da76d3c5468f80839 Mon Sep 17 00:00:00 2001 From: Rohan Chakravarthy Date: Mon, 30 Jul 2018 11:44:41 -0700 Subject: [PATCH 08/21] use secure value in ACI for secrets (#276) * use secure value in ACI for secrets * add tests for env variable conversion --- providers/azure/aci.go | 23 +++++++++++++--- providers/azure/aci_test.go | 52 +++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+), 4 deletions(-) diff --git a/providers/azure/aci.go b/providers/azure/aci.go index 58b533979..fc13c318e 100644 --- a/providers/azure/aci.go +++ b/providers/azure/aci.go @@ -676,10 +676,8 @@ func (p *ACIProvider) getContainers(pod *v1.Pod) ([]aci.Container, error) { c.EnvironmentVariables = make([]aci.EnvironmentVariable, 0, len(container.Env)) for _, e := range container.Env { - c.EnvironmentVariables = append(c.EnvironmentVariables, aci.EnvironmentVariable{ - Name: e.Name, - Value: e.Value, - }) + envVar := getACIEnvVar(e) + c.EnvironmentVariables = append(c.EnvironmentVariables, envVar) } // NOTE(robbiezhang): ACI CPU request must be times of 10m @@ -1058,3 +1056,20 @@ func filterServiceAccountSecretVolume(osType string, containerGroup *aci.Contain containerGroup.ContainerGroupProperties.Volumes = volumes } } + +func getACIEnvVar(e v1.EnvVar) aci.EnvironmentVariable { + var envVar aci.EnvironmentVariable + // If the variable is a secret, use SecureValue + if e.ValueFrom.SecretKeyRef != nil { + envVar = aci.EnvironmentVariable{ + Name: e.Name, + SecureValue: e.Value, + } + } else { + envVar = aci.EnvironmentVariable{ + Name: e.Name, + Value: e.Value, + } + } + return envVar +} diff --git a/providers/azure/aci_test.go b/providers/azure/aci_test.go index 81d7e3722..a71e5101e 100644 --- a/providers/azure/aci_test.go +++ b/providers/azure/aci_test.go @@ -363,6 +363,58 @@ func TestGetPodWithoutResourceRequestsLimits(t *testing.T) { "Containers[0].Resources.Requests.Memory doesn't match") } +func TestPodToACISecretEnvVar(t *testing.T) { + + testKey := "testVar" + testVal := "testVal" + + e := v1.EnvVar{ + Name: testKey, + Value: testVal, + ValueFrom: &v1.EnvVarSource{ + SecretKeyRef: &v1.SecretKeySelector{}, + }, + } + aciEnvVar := getACIEnvVar(e) + + if aciEnvVar.Value != "" { + t.Fatalf("ACI Env Variable Value should be empty for a secret") + } + + if aciEnvVar.Name != testKey { + t.Fatalf("ACI Env Variable Name does not match expected Name") + } + + if aciEnvVar.SecureValue != testVal { + t.Fatalf("ACI Env Variable Secure Value does not match expected value") + } +} + +func TestPodToACIEnvVar(t *testing.T) { + + testKey := "testVar" + testVal := "testVal" + + e := v1.EnvVar{ + Name: testKey, + Value: testVal, + ValueFrom: &v1.EnvVarSource{}, + } + aciEnvVar := getACIEnvVar(e) + + if aciEnvVar.SecureValue != "" { + t.Fatalf("ACI Env Variable Secure Value should be empty for non-secret variables") + } + + if aciEnvVar.Name != testKey { + t.Fatalf("ACI Env Variable Name does not match expected Name") + } + + if aciEnvVar.Value != testVal { + t.Fatalf("ACI Env Variable Value does not match expected value") + } +} + func prepareMocks() (*AADMock, *ACIMock, *ACIProvider, error) { aadServerMocker := NewAADMock() aciServerMocker := NewACIMock() From a4d8f74c7d8b7963845a63bcc1117c416d019450 Mon Sep 17 00:00:00 2001 From: Jeremy Rickard Date: Mon, 30 Jul 2018 14:38:36 -0600 Subject: [PATCH 09/21] Enabling Liveness and Readiness Probes in ACI Provider (#280) * Enabling Liveness and Readiness Probes in ACI Provider * Adding a check to ensure both exec and httpGet are not provided --- providers/azure/aci.go | 56 ++++++++++++++++ providers/azure/aci_test.go | 125 ++++++++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+) diff --git a/providers/azure/aci.go b/providers/azure/aci.go index fc13c318e..1884f94fc 100644 --- a/providers/azure/aci.go +++ b/providers/azure/aci.go @@ -722,11 +722,67 @@ func (p *ACIProvider) getContainers(pod *v1.Pod) ([]aci.Container, error) { } } + if container.LivenessProbe != nil { + probe, err := getProbe(container.LivenessProbe) + if err != nil { + return nil, err + } + c.LivenessProbe = probe + } + + if container.ReadinessProbe != nil { + probe, err := getProbe(container.ReadinessProbe) + if err != nil { + return nil, err + } + c.ReadinessProbe = probe + } + containers = append(containers, c) } return containers, nil } +func getProbe(probe *v1.Probe) (*aci.ContainerProbe, error) { + + if probe.Handler.Exec != nil && probe.Handler.HTTPGet != nil { + return nil, fmt.Errorf("probe may not specify more than one of \"exec\" and \"httpGet\"") + } + + if probe.Handler.Exec == nil && probe.Handler.HTTPGet == nil { + return nil, fmt.Errorf("probe must specify one of \"exec\" and \"httpGet\"") + } + + // Probes have can have a Exec or HTTP Get Handler. + // Create those if they exist, then add to the + // ContainerProbe struct + var exec *aci.ContainerExecProbe + if probe.Handler.Exec != nil { + exec = &aci.ContainerExecProbe{ + Command: probe.Handler.Exec.Command, + } + } + + var httpGET *aci.ContainerHTTPGetProbe + if probe.Handler.HTTPGet != nil { + httpGET = &aci.ContainerHTTPGetProbe{ + Port: probe.Handler.HTTPGet.Port.IntValue(), + Path: probe.Handler.HTTPGet.Path, + Scheme: string(probe.Handler.HTTPGet.Scheme), + } + } + + return &aci.ContainerProbe{ + Exec: exec, + HTTPGet: httpGET, + InitialDelaySeconds: probe.InitialDelaySeconds, + Period: probe.PeriodSeconds, + FailureThreshold: probe.FailureThreshold, + SuccessThreshold: probe.SuccessThreshold, + TimeoutSeconds: probe.TimeoutSeconds, + }, nil +} + func (p *ACIProvider) getVolumes(pod *v1.Pod) ([]aci.Volume, error) { volumes := make([]aci.Volume, 0, len(pod.Spec.Volumes)) for _, v := range pod.Spec.Volumes { diff --git a/providers/azure/aci_test.go b/providers/azure/aci_test.go index a71e5101e..6a030c219 100644 --- a/providers/azure/aci_test.go +++ b/providers/azure/aci_test.go @@ -20,6 +20,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes/fake" ) @@ -460,3 +461,127 @@ func prepareMocks() (*AADMock, *ACIMock, *ACIProvider, error) { func ptrQuantity(q resource.Quantity) *resource.Quantity { return &q } + +func TestCreatePodWithLivenessProbe(t *testing.T) { + _, aciServerMocker, provider, err := prepareMocks() + + if err != nil { + t.Fatal("Unable to prepare the mocks", err) + } + + podName := "pod-" + uuid.New().String() + podNamespace := "ns-" + uuid.New().String() + + aciServerMocker.OnCreate = func(subscription, resourceGroup, containerGroup string, cg *aci.ContainerGroup) (int, interface{}) { + assert.Equal(t, fakeSubscription, subscription, "Subscription doesn't match") + assert.Equal(t, fakeResourceGroup, resourceGroup, "Resource group doesn't match") + assert.NotNil(t, cg, "Container group is nil") + assert.Equal(t, podNamespace+"-"+podName, containerGroup, "Container group name is not expected") + assert.NotNil(t, cg.ContainerGroupProperties, "Container group properties should not be nil") + assert.NotNil(t, cg.ContainerGroupProperties.Containers, "Containers should not be nil") + assert.Equal(t, 1, len(cg.ContainerGroupProperties.Containers), "1 Container is expected") + assert.Equal(t, "nginx", cg.ContainerGroupProperties.Containers[0].Name, "Container nginx is expected") + assert.NotNil(t, cg.Containers[0].LivenessProbe, "Liveness probe expected") + assert.Equal(t, cg.Containers[0].LivenessProbe.InitialDelaySeconds, 10, "Initial Probe Delay doesn't match") + assert.Equal(t, cg.Containers[0].LivenessProbe.Period, 5, "Probe Period doesn't match") + assert.Equal(t, cg.Containers[0].LivenessProbe.TimeoutSeconds, 60, "Probe Timeout doesn't match") + assert.Equal(t, cg.Containers[0].LivenessProbe.SuccessThreshold, 3, "Probe Success Threshold doesn't match") + assert.Equal(t, cg.Containers[0].LivenessProbe.FailureThreshold, 5, "Probe Failure Threshold doesn't match") + assert.NotNil(t, cg.Containers[0].LivenessProbe.HTTPGet, "Expected an HTTP Get Probe") + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: podNamespace, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Name: "nginx", + LivenessProbe: &v1.Probe{ + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ + Port: intstr.FromString("8080"), + Path: "/", + }, + }, + InitialDelaySeconds: 10, + PeriodSeconds: 5, + TimeoutSeconds: 60, + SuccessThreshold: 3, + FailureThreshold: 5, + }, + }, + }, + }, + } + + if err := provider.CreatePod(pod); err != nil { + t.Fatal("Failed to create pod", err) + } + + return http.StatusOK, cg + } +} + +func TestCreatePodWithReadinessProbe(t *testing.T) { + _, aciServerMocker, provider, err := prepareMocks() + + if err != nil { + t.Fatal("Unable to prepare the mocks", err) + } + + podName := "pod-" + uuid.New().String() + podNamespace := "ns-" + uuid.New().String() + + aciServerMocker.OnCreate = func(subscription, resourceGroup, containerGroup string, cg *aci.ContainerGroup) (int, interface{}) { + assert.Equal(t, fakeSubscription, subscription, "Subscription doesn't match") + assert.Equal(t, fakeResourceGroup, resourceGroup, "Resource group doesn't match") + assert.NotNil(t, cg, "Container group is nil") + assert.Equal(t, podNamespace+"-"+podName, containerGroup, "Container group name is not expected") + assert.NotNil(t, cg.ContainerGroupProperties, "Container group properties should not be nil") + assert.NotNil(t, cg.ContainerGroupProperties.Containers, "Containers should not be nil") + assert.Equal(t, 1, len(cg.ContainerGroupProperties.Containers), "1 Container is expected") + assert.Equal(t, "nginx", cg.ContainerGroupProperties.Containers[0].Name, "Container nginx is expected") + assert.NotNil(t, cg.Containers[0].ReadinessProbe, "Readiness probe expected") + assert.Equal(t, cg.Containers[0].ReadinessProbe.InitialDelaySeconds, 10, "Initial Probe Delay doesn't match") + assert.Equal(t, cg.Containers[0].ReadinessProbe.Period, 5, "Probe Period doesn't match") + assert.Equal(t, cg.Containers[0].ReadinessProbe.TimeoutSeconds, 60, "Probe Timeout doesn't match") + assert.Equal(t, cg.Containers[0].ReadinessProbe.SuccessThreshold, 3, "Probe Success Threshold doesn't match") + assert.Equal(t, cg.Containers[0].ReadinessProbe.FailureThreshold, 5, "Probe Failure Threshold doesn't match") + assert.NotNil(t, cg.Containers[0].ReadinessProbe.HTTPGet, "Expected an HTTP Get Probe") + + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: podNamespace, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Name: "nginx", + ReadinessProbe: &v1.Probe{ + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ + Port: intstr.FromString("8080"), + Path: "/", + }, + }, + InitialDelaySeconds: 10, + PeriodSeconds: 5, + TimeoutSeconds: 60, + SuccessThreshold: 3, + FailureThreshold: 5, + }, + }, + }, + }, + } + + if err := provider.CreatePod(pod); err != nil { + t.Fatal("Failed to create pod", err) + } + + return http.StatusOK, cg + } +} From c3cb96d4d13a0296f4ae4627bb66c1313f2b197d Mon Sep 17 00:00:00 2001 From: Jeremy Rickard Date: Mon, 30 Jul 2018 13:51:07 -0600 Subject: [PATCH 10/21] Update VK AKS chart to have RBAC. RBAC default = true --- charts/virtual-kubelet-for-aks-0.1.5.tgz | Bin 0 -> 1946 bytes charts/virtual-kubelet-for-aks-latest.tgz | Bin 1661 -> 1946 bytes charts/virtual-kubelet-for-aks/Chart.yaml | 2 +- .../templates/clusterrolebinding.yaml | 14 ++++++++++++++ .../templates/deployment.yaml | 3 +++ .../templates/serviceaccount.yaml | 6 ++++++ charts/virtual-kubelet-for-aks/values.yaml | 8 ++++++++ 7 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 charts/virtual-kubelet-for-aks-0.1.5.tgz create mode 100644 charts/virtual-kubelet-for-aks/templates/clusterrolebinding.yaml create mode 100644 charts/virtual-kubelet-for-aks/templates/serviceaccount.yaml diff --git a/charts/virtual-kubelet-for-aks-0.1.5.tgz b/charts/virtual-kubelet-for-aks-0.1.5.tgz new file mode 100644 index 0000000000000000000000000000000000000000..30a8e85ade034836d5bb32edcabc87dad281df23 GIT binary patch literal 1946 zcmV;L2W9vliwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI<9Z{kQ2p3nXjCFWMzT@A)0Avsx!bQ!|Vq2xkj$eeCZC)C&l z^rY?fbhnu>8UFjJ+Zg;p0!dcMu0(yY-R^$+t?KEj>KPR(A*_CzjF6$KkGZImTiIw& ziBR^OgzVjm#c>?xq}4J{$8pL}=kVlk^=`_Wt=8emJ9v4|J4hlG5$_!5r8wou{Y4}| z=?fYw$|DyD{1aISzTSIK9B3pJVGNc^0!t+U8Ucym?4l1`z*)NwK1-xRVOt@gks>rg zAziBu7!t}{_>x3lD1QG(NPWR2A1mAEp#>0;5MA*2XhacypOR=|r9}^Dr6qYHd^FHg zrDEwe8WXCf$w&h>O0CM@)(nL-^K|T{-Ll?#>Hie+kp0gHOHlqX1hCEiPnt(%`#(83 z+S~tY2o1>uT?;^9%q3M^%w1U7X~l#96q&fdh(f8q;)Jnl&Zs|k;f#GFb7^5T(|F{2 zBCyRUMyeaQP%OE?`MPL;5h+&cC!-ZBW#vBgFg5>Af+z91aAHhavk4FI%FxI1W1z`s z_0^Mud7SblTQY1MQ90>>yY z@->Vu@Nn#A#9JppNTcWM|3mh#FpSMEH?DevPTy8{>e)KB_55!-CHp@)ICS>*{~9uw zV)b~JFhQkgG=TtP#^)FSk6_Ldu|Dao0oNK#$8%6qqADO$B0(>$!r%jlByuea%uio2 zsMn*+tWNLm!S;|Lk!Y`s4HgSM28ekeq0XR2;#k+r)DmjrgfR_L0~;2L+FP&VUsbl- z|8R;dMj>q#KhqW1V*gFYDV_fZPV?YkZ~w0$_xFuoVMaq2Bq|tFhH4(;X{eXQpP~!D zHWmv@*Rncyu|DI}6piB!d~DuLv|wAydUXhJOe2I^YMk|TsW~w1G_cf$4)*8N$<#G_ zGZcab2tV{u8ToL0XkKa9PsU?<2etYd45Prjr{ii1)Qmt@eQ0W!!`Fl`I;I!^iDNTj z+xj=A=%&7cgCjF~2_y7LA`v7H5iXJuiU<|Tbe?01ERaA10;3^S80gqYO{s*RM{~0V z=U07QS2L!J(f?(`a;*6(o5kM@x_i3&+GH(bK_fMW+E23nldKi#a|!#DWqfNL+{@d^ zcK%puW_L@kVrkjd;z8(_T6qye&ywD^3Tf=)~7I}t-FX+jKYIlDDQ z_BufZi-lYHt;ocXT>$ z<~@|o+ALeS6|moFd!5Qg z{3dwl<_ZBM`U2Gj&X;FNUUt{WRBmzZf^SCHH?SMFyF2tc{oak&?hHS9y_@Up(=KII zH_qI^e%h!w_xG#drS=V_N^~?*#-F5qKo$79+wXXvJKnI}@dn$}vwXSZqfmB?;UlmY zo&P))^%mzFK|il8@O;>e?x0ZPTo(%s_luUt>K6Rb+)VlF!4C=(FL!wQ_I&TT{1~NL`~|j|HF6 z07dNs)C`<{$z=g+9}2ou)S_xG9)7^rge&cv~b^2iFP*>yn<3k(>SC74SZ zC3n@v>4idhDEjD8C4h7ReI7Q5Ppd%Yo+5{nEnun(l~Zh!5~NI%SGKg|YDbpYq8L*y zQ?1B#5F0HqN^-l(m~1*h0F=3S`HdR4Sj*#4M>c`HbHD3ERzCIeTd!{)8TyKg_2{*8 zW3NZ8V;C-7V0xq9+9P{i+3x?Riu97J;BE1L%c=PP&BNop|Nk0Nyn3{Ogvk0F0G`%8`#E#m?`59)soNO*LIO!v$*x3wvEA-|`mO5es_GdPDj}?Xn~ad5s*ky-lUvznPl-_W zoP_M%i^Xvq=cLs#Psee}Pv`LDaP@A=o2}O2$vb#?&pSvW6@L-$9OtDt<;nd;BtYp4 z8Y{{p7YO_lSqQ$~dr%x`Bo$!{mP!IkB>@@%iQw#_4_v@myAM7~q(Wg^A)%2XG(sU= zs}2|v%3S!8L|-U=|3^rD!6hFn+vlML5RnjF@c3v%5q_VNXkw*B4``(&c_Mr?&{L&i z={6b@s;0?E1AjJ3t;*ll423lFbnK?xvfg^>{}l3&{m%$XQ2sFlu+9EYnnz{(KRG$t z+y83_4ao#u3qWAZB~@I^U0B&^#e@J9nYh4+LaD#vgt2SRs6Th%jC~_>X<;x@dqADOTzyqZKP<blTQY1MQ90>>yY z@->Vu@PBaZX2e@3K}e(L?EgdduP}_wE;p`vgHGR8ck0OviIjQ=%#$QzAhxt-{~~h$M0?3(QYn zF{sz0%&boD@4@zvA(3dWj13kGJqCz*Afe8nMt|a1*UZ!sYU6}44N?Oe7K_?juj5}; zw%h-3iY!JUZ52P$71(0`O~)yn{|8R<;9zh6uOavMjbC9#Ll-0}7*mF79^+}Km&Ko= z3%@oN3rp9sI(M-?wK^Fzqz3)P@fB=hVs6 zHGg|E6oLi_KlD)<`EYz_UTN4*#$$R1wfY(iqrkkU<7x}kj6ha>Xlj_l*Mu-SrWgQ; zV>4pg`ZuQNroMuMBQtslBlJlk5hM>0E|L+72o=h7o@0tEkU#_iqajro=-5b2sf3?L zbF&8LSAAVqGp3Bu|7F8+tobUN#or9Ndw;t7+GH(bK_fMW+E23nldKi#a|!#DWqfNL z+{@d^cK%`NP9yv?KmvW~R)e4T<$OX@ojLn>)P z3~D*MHAVJ1K?aM3TluZX#E@M9=Q@`jtF&0>&hq^)(`UtjU)+4`Ty_S->z+4Q{RA*c zS{G_f=b+`(Dy`1i{bAedoOiAU-LuQ#Wv_iU==QFv01Z@rgZQ#xvTybmX@9a(C9vOb zyv}gYxjMTV47=x5klg$*n%RZaUVef_!2Zq0e%tF_>w)bITg(qW4Bo!%cAhZWEL*u1 zu-|EWoytc1CV1%P3IQbg0@Vf1muE>{cGt*MZgK8{Z${WRup73!JM=pJ-i_Dp3_p3j zo9pee?x0ZPTo(%s_luUt>K6Rb+)VlF!4C=zp`}zqN92|65bH zm`GitUXKNz(Evs51Jn$he#vD4Yaa@_RMetsE*^fs*MuwWtED(RKw~*YnDWR9sM&Qv z4hswzmnE1>8YOqt#_5GZc_{kmQ6+$M0ev1eh)=6P=AI&llPzGX3zbuBlMo?@b$<{WEip=RyULhsIza%Gxp?`F8n;-><55R8fxL6S>qJ&Q_3~S< zZyy=@ii`E=wR2;yN3CNRE?r=Hqu<&idtKS?|EG%dlB?it@qf#y`2WqrjLK`sJtWPK63OF@2uemjE;{m^7CMCy@M~4UF_fRFiCm%IW(}%{+lPwlKnSZPHS)f ruOYkV|8ne5$htj^CH@RKuf6PLFMHX`yLbN&00960Ms3|804@LkY(c!s delta 1618 zcmV-Y2Cey;5B&^~K7ZRd67FaHih*zo>@I3qPT~d-5a6Wr|)T{cFBx-k*5LGDzuGb2)a z3zD!m-z~oH`={-;{q=pn`s*K`9&etlzSV9YpT2?bANdKADSu7m8{hw4oa*BKBod>F zBuzCHJOBd!K$bvc&K|S?%9SRJ!OBQrWh6j35Dw>;0}v9xas zP6IF@lm+lH;~y#h{`Z7NQYbOeUL+C+fRh9R=!@}~BK$TZeCp(?uUMs{L?$D&&@-)5 z6*QYus%P2Q0Dm?sy{hld9Hp}B^u3mM;=K0O{~_dS{y!%yL-qe5fPMad(mtvB|LJM_ z;QucnG$B(A8~}-_P*e-K2w>x(#sN)^gE$_`3h)~sii2yQk5V}qzt5@GO{B#f{5zs4lBr`){m4a0$_@Ab3C z*n9uC{EGjd9v%A!|9=S?&ai&HE106zluseXl!*n#AUG^UCbu`eGZZjKNhb@?GoouC zGorw3t$)V&Ey#=qjsx~%q!~0CeC<|e4-eq=ks(p&ZJZ63OEU*ZMXaE~z$IyFTCU9! z+)2im0dc{D<6lhdc%Kp;gMXf(4ctLY?6b`lJZIIe0WnS}M{slJ zY;3h8V;a0Xu{4ej-iJKMG%b5`l#<2>UqH)@NAT|0KGI~6O(ygn+{P9Rd2FBad37Wj zMj)Fp3^gp^Q$`q_P>g}3shzRs{0nn*+g`)biJiTIF-9a)2udUfm)RI4M~y09=Y%4Q z6@L(kz-U4>#wIt?GpgXn@xt!G#r43nHH@iJjDFg&9b3Lyui|eOJve*%-mH73l5#x( z_XpMZLAho6T)}?JHomqE9_008KmX?;K!W_)<)A(Me{$6JtNj12ec=BW5t7mm>#_*m zYvkrpHRr7{YSMDtGZ=OH;eSPV zJ?x%ejjno~^I^AlT?c5Qj+!J=O`CmtAC=&Bu-|U`;b<6MpI;A0-HSR%VSUKwUMaON zF0c$Zxcz0&>346;#GVXWt`B@2ymQqJzhSoPYQVCmg-+_ik_YFS}Az*|~BH`+le4JUncI z7xyzNotbE)ia(`-?*zW-4#NJ2us`aA{oy|ItQL18l-f%%c?9+{{P(w_-r-^==!dNb zUW~f6jjbQO4H|bGyVc(iJqt{BtTW}MA}#>V1h1#On(({6Vp1! zba$<)&I}|{jV;z=P-RxwB1Ixeh`Ag7<5oxe@0PmdRGAiyMk>Xe#wgvl;95BIP^bd9 zZ%ejRwxW714!*#rOlXAVGRF}yTF)uNRA*K~U6%_BSYW|~s=!=Pp550w=LN;;RLs?* zD**WhMj~mFh}MB@ow9(l-haT{7HYTHE+fd9rf6)r=K6^&v&S%ILgiLb7$CJ)VvXc( zlQHeOK>)NZy#7iVu9Rlg17sL!A-9+& z&u3?qh@tq`%ufYUbZiLpJgiGxRG^B1RPe~vQ~u8izQFk13G%22_Sz?Skb@lLAkUNk Q1pom5|KLwHg8(J~0BKMxzyJUM diff --git a/charts/virtual-kubelet-for-aks/Chart.yaml b/charts/virtual-kubelet-for-aks/Chart.yaml index e88317184..75338af14 100644 --- a/charts/virtual-kubelet-for-aks/Chart.yaml +++ b/charts/virtual-kubelet-for-aks/Chart.yaml @@ -1,5 +1,5 @@ name: virtual-kubelet-for-aks -version: 0.1.4 +version: 0.1.5 description: a Helm chart to install virtual kubelet in an AKS or ACS cluster. sources: - https://github.com/virtual-kubelet/virtual-kubelet diff --git a/charts/virtual-kubelet-for-aks/templates/clusterrolebinding.yaml b/charts/virtual-kubelet-for-aks/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..620072e25 --- /dev/null +++ b/charts/virtual-kubelet-for-aks/templates/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +{{ if .Values.rbac.install }} +apiVersion: "rbac.authorization.k8s.io/{{ .Values.rbac.apiVersion }}" +kind: ClusterRoleBinding +metadata: + name: {{ template "fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Values.rbac.roleRef }} +{{ end }} \ No newline at end of file diff --git a/charts/virtual-kubelet-for-aks/templates/deployment.yaml b/charts/virtual-kubelet-for-aks/templates/deployment.yaml index e7c2269bc..9b88283b3 100644 --- a/charts/virtual-kubelet-for-aks/templates/deployment.yaml +++ b/charts/virtual-kubelet-for-aks/templates/deployment.yaml @@ -56,5 +56,8 @@ spec: hostPath: path: /etc/kubernetes/azure.json type: File + {{ if .Values.rbac.install }} + serviceAccountName: {{ template "fullname" . }} + {{ end }} nodeSelector: beta.kubernetes.io/os: linux \ No newline at end of file diff --git a/charts/virtual-kubelet-for-aks/templates/serviceaccount.yaml b/charts/virtual-kubelet-for-aks/templates/serviceaccount.yaml new file mode 100644 index 000000000..31eb4650d --- /dev/null +++ b/charts/virtual-kubelet-for-aks/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +{{ if .Values.rbac.install }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "fullname" . }} +{{ end }} \ No newline at end of file diff --git a/charts/virtual-kubelet-for-aks/values.yaml b/charts/virtual-kubelet-for-aks/values.yaml index 7dd131fcb..e34f51a6f 100644 --- a/charts/virtual-kubelet-for-aks/values.yaml +++ b/charts/virtual-kubelet-for-aks/values.yaml @@ -15,3 +15,11 @@ env: apiserverCert: apiserverKey: monitoredNamespace: + +# Install Default RBAC roles and bindings +rbac: + install: true + ## RBAC api version + apiVersion: v1beta1 + # Cluster role reference + roleRef: cluster-admin \ No newline at end of file From f9c7af5ec9bd787b0299804d10295bb67946920b Mon Sep 17 00:00:00 2001 From: Liang Mingqiang Date: Tue, 31 Jul 2018 16:28:42 -0400 Subject: [PATCH 11/21] read a section of config (#255) --- cmd/root.go | 5 +++++ providers/mock/mock.go | 39 +++++++++++++++++++++------------------ 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index 7e35adde5..a7643a8d1 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -67,6 +67,11 @@ func Execute() { func init() { cobra.OnInitialize(initConfig) + // read default node name from environment variable. + // it can be overwritten by cli flags if specified. + if os.Getenv("DEFAULT_NODE_NAME") != "" { + nodeName = os.Getenv("DEFAULT_NODE_NAME") + } // Here you will define your flags and configuration settings. // Cobra supports persistent flags, which, if defined here, // will be global for your application. diff --git a/providers/mock/mock.go b/providers/mock/mock.go index 5bfab8ac4..f84eed032 100644 --- a/providers/mock/mock.go +++ b/providers/mock/mock.go @@ -42,7 +42,7 @@ type MockConfig struct { // NewMockProvider creates a new MockProvider func NewMockProvider(providerConfig, nodeName, operatingSystem string, internalIP string, daemonEndpointPort int32) (*MockProvider, error) { - config, err := loadConfig(providerConfig) + config, err := loadConfig(providerConfig, nodeName) if err != nil { return nil, err } @@ -59,25 +59,28 @@ func NewMockProvider(providerConfig, nodeName, operatingSystem string, internalI } // loadConfig loads the given json configuration files. -func loadConfig(providerConfig string) (config MockConfig, err error) { - if providerConfig != "" { - data, err := ioutil.ReadFile(providerConfig) - if err != nil { - return config, err + +func loadConfig(providerConfig, nodeName string) (config MockConfig, err error) { + data, err := ioutil.ReadFile(providerConfig) + if err != nil { + return config, err + } + configMap := map[string]MockConfig{} + err = json.Unmarshal(data, &configMap) + if err != nil { + return config, err + } + if _, exist := configMap[nodeName]; exist { + config = configMap[nodeName] + if config.CPU == "" { + config.CPU = defaultCPUCapacity } - err = json.Unmarshal(data, &config) - if err != nil { - return config, err + if config.Memory == "" { + config.Memory = defaultMemoryCapacity + } + if config.Pods == "" { + config.Pods = defaultPodCapacity } - } - if config.CPU == "" { - config.CPU = defaultCPUCapacity - } - if config.Memory == "" { - config.Memory = defaultMemoryCapacity - } - if config.Pods == "" { - config.Pods = defaultPodCapacity } if _, err = resource.ParseQuantity(config.CPU); err != nil { From 3f83588e599075aee7958620f62e78fdeb97152e Mon Sep 17 00:00:00 2001 From: Robbie Zhang Date: Tue, 31 Jul 2018 13:31:00 -0700 Subject: [PATCH 12/21] Reduce ACI API calls (#282) * Reduce ACI API calls Reduce reconcile calls and API calls in reconcile * Fix the pod status update issue * Revert a few unnecessary change --- manager/resource.go | 60 ++++++++++++++++------------ manager/resource_test.go | 6 +-- providers/azure/client/aci/delete.go | 5 --- vkubelet/vkubelet.go | 47 +++++++++++++--------- 4 files changed, 66 insertions(+), 52 deletions(-) diff --git a/manager/resource.go b/manager/resource.go index 6e7b990f0..41fb1d9fa 100644 --- a/manager/resource.go +++ b/manager/resource.go @@ -18,6 +18,7 @@ type ResourceManager struct { k8sClient kubernetes.Interface pods map[string]*v1.Pod + deletingPods map[string]*v1.Pod configMapRef map[string]int64 configMaps map[string]*v1.ConfigMap secretRef map[string]int64 @@ -28,6 +29,7 @@ type ResourceManager struct { func NewResourceManager(k8sClient kubernetes.Interface) *ResourceManager { rm := ResourceManager{ pods: make(map[string]*v1.Pod, 0), + deletingPods: make(map[string]*v1.Pod, 0), configMapRef: make(map[string]int64, 0), secretRef: make(map[string]int64, 0), configMaps: make(map[string]*v1.ConfigMap, 0), @@ -81,53 +83,52 @@ func (rm *ResourceManager) SetPods(pods *v1.PodList) { rm.secrets = make(map[string]*v1.Secret, len(pods.Items)) for k, p := range pods.Items { - if p.Status.Phase == v1.PodSucceeded { - continue - } rm.pods[rm.getStoreKey(p.Namespace, p.Name)] = &pods.Items[k] rm.incrementRefCounters(&p) } } -// AddPod adds a pod to the internal cache. -func (rm *ResourceManager) AddPod(p *v1.Pod) { - rm.Lock() - defer rm.Unlock() - if p.Status.Phase == v1.PodSucceeded { - return - } - - podKey := rm.getStoreKey(p.Namespace, p.Name) - if _, ok := rm.pods[podKey]; ok { - rm.UpdatePod(p) - return - } - - rm.pods[podKey] = p - rm.incrementRefCounters(p) -} - // UpdatePod updates the supplied pod in the cache. -func (rm *ResourceManager) UpdatePod(p *v1.Pod) { +func (rm *ResourceManager) UpdatePod(p *v1.Pod) bool { rm.Lock() defer rm.Unlock() podKey := rm.getStoreKey(p.Namespace, p.Name) - if p.Status.Phase == v1.PodSucceeded { - delete(rm.pods, podKey) + if p.DeletionTimestamp != nil { + if old, ok := rm.pods[podKey]; ok { + rm.deletingPods[podKey] = p + + rm.decrementRefCounters(old) + delete(rm.pods, podKey) + + return true + } + + if _, ok := rm.deletingPods[podKey]; ok { + return false + } + + return false } if old, ok := rm.pods[podKey]; ok { rm.decrementRefCounters(old) + rm.pods[podKey] = p + rm.incrementRefCounters(p) + + // NOTE(junjiez): no reconcile as we don't support update pod. + return false } - rm.incrementRefCounters(p) rm.pods[podKey] = p + rm.incrementRefCounters(p) + + return true } // DeletePod removes the pod from the cache. -func (rm *ResourceManager) DeletePod(p *v1.Pod) { +func (rm *ResourceManager) DeletePod(p *v1.Pod) bool { rm.Lock() defer rm.Unlock() @@ -135,7 +136,14 @@ func (rm *ResourceManager) DeletePod(p *v1.Pod) { if old, ok := rm.pods[podKey]; ok { rm.decrementRefCounters(old) delete(rm.pods, podKey) + return true } + + if _, ok := rm.deletingPods[podKey]; ok { + delete(rm.deletingPods, podKey) + } + + return false } // GetPod retrieves the specified pod from the cache. It returns nil if a pod is not found. diff --git a/manager/resource_test.go b/manager/resource_test.go index 1546a89e5..111d6fc90 100644 --- a/manager/resource_test.go +++ b/manager/resource_test.go @@ -23,7 +23,7 @@ func TestResourceManager(t *testing.T) { pod1Name := "Pod1" pod1Namespace := "Pod1Namespace" pod1 := makePod(pod1Namespace, pod1Name) - pm.AddPod(pod1) + pm.UpdatePod(pod1) pods := pm.GetPods() if len(pods) != 1 { @@ -40,7 +40,7 @@ func TestResourceManagerDeletePod(t *testing.T) { pod1Name := "Pod1" pod1Namespace := "Pod1Namespace" pod1 := makePod(pod1Namespace, pod1Name) - pm.AddPod(pod1) + pm.UpdatePod(pod1) pods := pm.GetPods() if len(pods) != 1 { t.Errorf("Got %d, expected 1 pod", len(pods)) @@ -65,7 +65,7 @@ func TestResourceManagerUpdatePod(t *testing.T) { pod1Name := "Pod1" pod1Namespace := "Pod1Namespace" pod1 := makePod(pod1Namespace, pod1Name) - pm.AddPod(pod1) + pm.UpdatePod(pod1) pods := pm.GetPods() if len(pods) != 1 { diff --git a/providers/azure/client/aci/delete.go b/providers/azure/client/aci/delete.go index 5386825f0..de23a7e7c 100644 --- a/providers/azure/client/aci/delete.go +++ b/providers/azure/client/aci/delete.go @@ -46,10 +46,5 @@ func (c *Client) DeleteContainerGroup(resourceGroup, containerGroupName string) return err } - // 204 No Content means the specified container group was not found. - if resp.StatusCode == http.StatusNoContent { - return fmt.Errorf("Container group with name %q was not found", containerGroupName) - } - return nil } diff --git a/vkubelet/vkubelet.go b/vkubelet/vkubelet.go index 6a36f62fe..a1f5af32c 100644 --- a/vkubelet/vkubelet.go +++ b/vkubelet/vkubelet.go @@ -251,15 +251,19 @@ func (s *Server) Run() error { } log.Println("Pod watcher event is received:", ev.Type) + reconcile := false switch ev.Type { case watch.Added: - s.resourceManager.AddPod(ev.Object.(*corev1.Pod)) + reconcile = s.resourceManager.UpdatePod(ev.Object.(*corev1.Pod)) case watch.Modified: - s.resourceManager.UpdatePod(ev.Object.(*corev1.Pod)) + reconcile = s.resourceManager.UpdatePod(ev.Object.(*corev1.Pod)) case watch.Deleted: - s.resourceManager.DeletePod(ev.Object.(*corev1.Pod)) + reconcile = s.resourceManager.DeletePod(ev.Object.(*corev1.Pod)) + } + + if reconcile { + s.reconcile() } - s.reconcile() } } @@ -310,6 +314,7 @@ func (s *Server) updateNode() { // reconcile is the main reconciliation loop that compares differences between Kubernetes and // the active provider and reconciles the differences. func (s *Server) reconcile() { + log.Println("Start reconcile.") providerPods, err := s.provider.GetPods() if err != nil { log.Println(err) @@ -318,7 +323,8 @@ func (s *Server) reconcile() { for _, pod := range providerPods { // Delete pods that don't exist in Kubernetes - if p := s.resourceManager.GetPod(pod.Namespace, pod.Name); p == nil { + if p := s.resourceManager.GetPod(pod.Namespace, pod.Name); p == nil || p.DeletionTimestamp != nil { + log.Printf("Deleting pod '%s'\n", pod.Name) if err := s.deletePod(pod); err != nil { log.Printf("Error deleting pod '%s': %s\n", pod.Name, err) continue @@ -329,21 +335,25 @@ func (s *Server) reconcile() { // Create any pods for k8s pods that don't exist in the provider pods := s.resourceManager.GetPods() for _, pod := range pods { - p, err := s.provider.GetPod(pod.Namespace, pod.Name) - if err != nil { - log.Printf("Error retrieving pod '%s' from provider: %s\n", pod.Name, err) + var providerPod *corev1.Pod + for _, p := range providerPods { + if p.Namespace == pod.Namespace && p.Name == pod.Name { + providerPod = p + break; + } } - if pod.DeletionTimestamp == nil && pod.Status.Phase != corev1.PodFailed && p == nil { + if pod.DeletionTimestamp == nil && pod.Status.Phase != corev1.PodFailed && providerPod == nil { + log.Printf("Creating pod '%s'\n", pod.Name) if err := s.createPod(pod); err != nil { log.Printf("Error creating pod '%s': %s\n", pod.Name, err) continue } - log.Printf("Pod '%s' created.\n", pod.Name) } - // Delete pod if DeletionTimestamp set + // Delete pod if DeletionTimestamp is set if pod.DeletionTimestamp != nil { + log.Printf("Pod '%s' is pending deletion.\n", pod.Name) var err error if err = s.deletePod(pod); err != nil { log.Printf("Error deleting pod '%s': %s\n", pod.Name, err) @@ -373,25 +383,30 @@ func (s *Server) createPod(pod *corev1.Pod) error { return origErr } + log.Printf("Pod '%s' created.\n", pod.Name) + return nil } func (s *Server) deletePod(pod *corev1.Pod) error { var delErr error if delErr = s.provider.DeletePod(pod); delErr != nil && errors.IsNotFound(delErr) { - return fmt.Errorf("Error deleting pod '%s': %s", pod.Name, delErr) + return delErr } if !errors.IsNotFound(delErr) { var grace int64 if err := s.k8sClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &grace}); err != nil && errors.IsNotFound(err) { if errors.IsNotFound(err) { + log.Printf("Pod '%s' doesn't exist.\n", pod.Name) return nil } return fmt.Errorf("Failed to delete kubernetes pod: %s", err) } + s.resourceManager.DeletePod(pod) + log.Printf("Pod '%s' deleted.\n", pod.Name) } @@ -403,17 +418,13 @@ func (s *Server) updatePodStatuses() { // Update all the pods with the provider status. pods := s.resourceManager.GetPods() for _, pod := range pods { - if pod.DeletionTimestamp != nil && pod.Status.Phase == corev1.PodSucceeded { - continue - } - - if pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == PodStatusReason_ProviderFailed { + if pod.Status.Phase == corev1.PodSucceeded || (pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == PodStatusReason_ProviderFailed) { continue } status, err := s.provider.GetPodStatus(pod.Namespace, pod.Name) if err != nil { - log.Printf("Error retrieving pod '%s' status from provider: %s\n", pod.Name, err) + log.Printf("Error retrieving pod '%s' in namespace '%s' status from provider: %s\n", pod.Name, pod.Namespace, err) return } From 36db5d9583d117d1a8cfdfd990a87f03c112fa08 Mon Sep 17 00:00:00 2001 From: yaron2 Date: Tue, 31 Jul 2018 16:00:56 -0700 Subject: [PATCH 13/21] added Service Fabric Mesh provider --- README.md | 14 + providers/sfmesh/README.md | 64 +++ providers/sfmesh/sfmesh.go | 850 +++++++++++++++++++++++++++++++++++++ vkubelet/provider.go | 2 + vkubelet/vkubelet.go | 18 +- 5 files changed, 942 insertions(+), 6 deletions(-) create mode 100644 providers/sfmesh/README.md create mode 100644 providers/sfmesh/sfmesh.go diff --git a/README.md b/README.md index f03fedab2..40c50ec8d 100644 --- a/README.md +++ b/README.md @@ -23,6 +23,7 @@ The best description is "Kubernetes API on top, programmable back." + [Azure Batch GPU Provider](./providers/azurebatch/README.md) + [AWS Fargate Provider](#aws-fargate-provider) + [Hyper.sh Provider](#hypersh-provider) + + [Service Fabric Mesh Provider](#service-fabric-mesh-provider) + [Adding a New Provider via the Provider Interface](#adding-a-new-provider-via-the-provider-interface) * [Testing](#testing) + [Testing the Azure Provider Client](#testing-the-azure-provider-client) @@ -143,6 +144,19 @@ Kubernetes cluster. ./bin/virtual-kubelet --provider hyper ``` +### Service Fabric Mesh Provider + +The Service Fabric Mesh Provider allows you to deploy pods to Azure [Service Fabric Mesh](https://docs.microsoft.com/en-us/azure/service-fabric-mesh/service-fabric-mesh-overview). + +Service Fabric Mesh is a fully managed service that lets developers deploy microservices without managing the underlying infrastructure. + Pods deployed to Service Fabric Mesh will be assigned Public IPs from the Service Fabric Mesh network. + +``` +./bin/virtual-kubelet --provider sfmesh --taint azure.com/sfmesh +``` + +More detailed instructions can be found [here](providers/sfmesh/README.md). + ### Adding a New Provider via the Provider Interface The structure we chose allows you to have all the power of the Kubernetes API diff --git a/providers/sfmesh/README.md b/providers/sfmesh/README.md new file mode 100644 index 000000000..df6c3e699 --- /dev/null +++ b/providers/sfmesh/README.md @@ -0,0 +1,64 @@ +# Kubernetes Virtual Kubelet with Service Fabric Mesh + +[Service Fabric Mesh](https://docs.microsoft.com/en-us/azure/service-fabric-mesh/service-fabric-mesh-overview) is a fully managed service that enables developers to deploy microservices applications without managing virtual machines, storage, or networking. Applications hosted on Service Fabric Mesh run and scale without you worrying about the infrastructure powering it. + +The Virtual kubelet integration allows you to use the Kubernetes API to burst out compute to a Service Fabric Mesh cluster and schedule pods as Mesh Applications. + +## Status: Experimental + +This provider is currently in the exterimental stages. Contributions welcome! + +## Setup + +The provider expects the following environment variables to be configured: + +- AZURE_CLIENT_ID +- AZURE_CLIENT_SECRET +- AZURE_SUBSCRIPTION_ID +- AZURE_TENANT_ID +- RESOURCE_GROUP +- REGION + +## Quick Start + +#### Run the Virtual Kubelet + +``` +./virtual-kubelet --provider=sfmesh --taint azure.com/sfmesh +``` + +#### Create pod yaml: + +``` +$ cat pod-nginx +apiVersion: v1 +kind: Pod +metadata: + name: nginx +spec: + nodeName: virtual-kubelet + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 + tolerations: + - key: azure.com/sfmesh + effect: NoSchedule +``` + +#### create pod + +``` +$ kubectl create -f pod-nginx +``` + +#### list containers on Service Fabric Mesh + +``` +$ az mesh app list -o table + +Name ResourceGroup ProvisioningState Location +------ --------------- ------------------- ---------- +nginx myResourceGroup Succeeded eastus +``` diff --git a/providers/sfmesh/sfmesh.go b/providers/sfmesh/sfmesh.go new file mode 100644 index 000000000..f551cad6e --- /dev/null +++ b/providers/sfmesh/sfmesh.go @@ -0,0 +1,850 @@ +package sfmesh + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "strconv" + "time" + + "github.com/Azure/azure-sdk-for-go/profiles/preview/preview/servicefabricmesh/mgmt/servicefabricmesh" + "github.com/Azure/go-autorest/autorest/azure/auth" + "github.com/virtual-kubelet/virtual-kubelet/manager" + "github.com/virtual-kubelet/virtual-kubelet/providers" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/remotecommand" +) + +const ( + defaultCPUCapacity = "60" + defaultMemoryCapacity = "48Gi" + defaultPodCapacity = "5" + defaultCPURequests = 1.0 + defaultMemoryRequests = 1.0 + defaultCPULimit = 4.0 + defaultMemoryLimit = 16.0 +) + +// SFMeshProvider implements the Virtual Kubelet provider interface +type SFMeshProvider struct { + nodeName string + operatingSystem string + internalIP string + daemonEndpointPort int32 + appClient *servicefabricmesh.ApplicationClient + networkClient *servicefabricmesh.NetworkClient + serviceClient *servicefabricmesh.ServiceClient + region string + resourceGroup string + resourceManager *manager.ResourceManager +} + +// AuthConfig is the secret returned from an ImageRegistryCredential +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + Email string `json:"email,omitempty"` + ServerAddress string `json:"serveraddress,omitempty"` + IdentityToken string `json:"identitytoken,omitempty"` + RegistryToken string `json:"registrytoken,omitempty"` +} + +// NewSFMeshProvider creates a new SFMeshProvider +func NewSFMeshProvider(rm *manager.ResourceManager, nodeName, operatingSystem string, internalIP string, daemonEndpointPort int32) (*SFMeshProvider, error) { + azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID") + azureTenantID := os.Getenv("AZURE_TENANT_ID") + azureClientID := os.Getenv("AZURE_CLIENT_ID") + azureClientSecret := os.Getenv("AZURE_CLIENT_SECRET") + region := os.Getenv("REGION") + resourceGroup := os.Getenv("RESOURCE_GROUP") + + if azureSubscriptionID == "" { + return nil, errors.New("Subscription ID cannot be empty, please set AZURE_SUBSCRIPTION_ID") + } + if azureTenantID == "" { + return nil, errors.New("Tenant ID cannot be empty, please set AZURE_TENANT_ID") + } + if azureClientID == "" { + return nil, errors.New("Client ID cannot be empty, please set AZURE_CLIENT_ID ") + } + if azureClientSecret == "" { + return nil, errors.New("Client Secret cannot be empty, please set AZURE_CLIENT_SECRET ") + } + if region == "" { + return nil, errors.New("Region cannot be empty, please set REGION ") + } + if resourceGroup == "" { + return nil, errors.New("Resource Group cannot be empty, please set RESOURCE_GROUP ") + } + + client := servicefabricmesh.NewApplicationClient(azureSubscriptionID) + + auth, err := auth.NewAuthorizerFromEnvironment() + if err != nil { + return nil, err + } + + client.Authorizer = auth + + networkClient := servicefabricmesh.NewNetworkClient(azureSubscriptionID) + networkClient.Authorizer = auth + + serviceClient := servicefabricmesh.NewServiceClient(azureSubscriptionID) + serviceClient.Authorizer = auth + + provider := SFMeshProvider{ + nodeName: nodeName, + operatingSystem: operatingSystem, + internalIP: internalIP, + daemonEndpointPort: daemonEndpointPort, + appClient: &client, + networkClient: &networkClient, + serviceClient: &serviceClient, + region: region, + resourceGroup: resourceGroup, + resourceManager: rm, + } + return &provider, nil +} + +func readDockerCfgSecret(secret *v1.Secret, ips []servicefabricmesh.ImageRegistryCredential) ([]servicefabricmesh.ImageRegistryCredential, error) { + var err error + var authConfigs map[string]AuthConfig + repoData, ok := secret.Data[string(v1.DockerConfigKey)] + + if !ok { + return ips, fmt.Errorf("no dockercfg present in secret") + } + + err = json.Unmarshal(repoData, &authConfigs) + if err != nil { + return ips, err + } + + for server, authConfig := range authConfigs { + ips = append(ips, servicefabricmesh.ImageRegistryCredential{ + Password: &authConfig.Password, + Server: &server, + Username: &authConfig.Username, + }) + } + + return ips, err +} + +func readDockerConfigJSONSecret(secret *v1.Secret, ips []servicefabricmesh.ImageRegistryCredential) ([]servicefabricmesh.ImageRegistryCredential, error) { + var err error + repoData, ok := secret.Data[string(v1.DockerConfigJsonKey)] + + if !ok { + return ips, fmt.Errorf("no dockerconfigjson present in secret") + } + + var authConfigs map[string]map[string]AuthConfig + + err = json.Unmarshal(repoData, &authConfigs) + if err != nil { + return ips, err + } + + auths, ok := authConfigs["auths"] + + if !ok { + return ips, fmt.Errorf("malformed dockerconfigjson in secret") + } + + for server, authConfig := range auths { + ips = append(ips, servicefabricmesh.ImageRegistryCredential{ + Password: &authConfig.Password, + Server: &server, + Username: &authConfig.Username, + }) + } + + return ips, err +} + +func (p *SFMeshProvider) getImagePullSecrets(pod *v1.Pod) ([]servicefabricmesh.ImageRegistryCredential, error) { + ips := make([]servicefabricmesh.ImageRegistryCredential, 0, len(pod.Spec.ImagePullSecrets)) + for _, ref := range pod.Spec.ImagePullSecrets { + secret, err := p.resourceManager.GetSecret(ref.Name, pod.Namespace) + if err != nil { + return ips, err + } + if secret == nil { + return nil, fmt.Errorf("error getting image pull secret") + } + + switch secret.Type { + case v1.SecretTypeDockercfg: + ips, err = readDockerCfgSecret(secret, ips) + case v1.SecretTypeDockerConfigJson: + ips, err = readDockerConfigJSONSecret(secret, ips) + default: + return nil, fmt.Errorf("image pull secret type is not one of kubernetes.io/dockercfg or kubernetes.io/dockerconfigjson") + } + + if err != nil { + return ips, err + } + + } + return ips, nil +} + +func (p *SFMeshProvider) getMeshApplication(pod *v1.Pod) (servicefabricmesh.ApplicationResourceDescription, error) { + meshApp := servicefabricmesh.ApplicationResourceDescription{} + meshApp.Name = &pod.Name + meshApp.Location = &p.region + + podUID := string(pod.UID) + podCreationTimestamp := pod.CreationTimestamp.String() + + tags := map[string]*string{ + "PodName": &pod.Name, + "ClusterName": &pod.ClusterName, + "NodeName": &pod.Spec.NodeName, + "Namespace": &pod.Namespace, + "UID": &podUID, + "CreationTimestamp": &podCreationTimestamp, + } + + meshApp.Tags = tags + + properties := servicefabricmesh.ApplicationResourceProperties{} + meshApp.ApplicationResourceProperties = &properties + + services := []servicefabricmesh.ServiceResourceDescription{} + service := servicefabricmesh.ServiceResourceDescription{} + serviceName := *meshApp.Name + "-service" + service.Name = &serviceName + serviceType := "Microsoft.ServiceFabricMesh/services" + service.Type = &serviceType + + creds, err := p.getImagePullSecrets(pod) + if err != nil { + return meshApp, err + } + + codePackages := []servicefabricmesh.ContainerCodePackageProperties{} + + for _, container := range pod.Spec.Containers { + codePackage := servicefabricmesh.ContainerCodePackageProperties{} + codePackage.Image = &container.Image + codePackage.Name = &container.Name + + if creds != nil { + if len(creds) > 0 { + // Mesh ImageRegistryCredential supports only a single credential + codePackage.ImageRegistryCredential = &creds[0] + } + } + + requirements := servicefabricmesh.ResourceRequirements{} + requests := servicefabricmesh.ResourceRequests{} + + cpuRequest := defaultCPURequests + memoryRequest := defaultMemoryRequests + + if container.Resources.Requests != nil { + if _, ok := container.Resources.Requests[v1.ResourceCPU]; ok { + containerCPURequest := float64(container.Resources.Requests.Cpu().MilliValue()/10.00) / 100.00 + if containerCPURequest > 1 && containerCPURequest <= 4 { + cpuRequest = containerCPURequest + } + } + + if _, ok := container.Resources.Requests[v1.ResourceMemory]; ok { + containerMemoryRequest := float64(container.Resources.Requests.Memory().Value()/100000000.00) / 10.00 + if containerMemoryRequest < 0.10 { + containerMemoryRequest = 0.10 + } + + containerMemoryRequest = 1 + memoryRequest = containerMemoryRequest + } + } + + requests.CPU = &cpuRequest + requests.MemoryInGB = &memoryRequest + + requirements.Requests = &requests + + if container.Resources.Limits != nil { + cpuLimit := defaultCPULimit + memoryLimit := defaultMemoryLimit + + limits := servicefabricmesh.ResourceLimits{} + limits.CPU = &cpuLimit + limits.MemoryInGB = &memoryLimit + + if _, ok := container.Resources.Limits[v1.ResourceCPU]; ok { + containerCPULimit := float64(container.Resources.Limits.Cpu().MilliValue()) / 1000.00 + if containerCPULimit > 1 { + limits.CPU = &containerCPULimit + } + } + + if _, ok := container.Resources.Limits[v1.ResourceMemory]; ok { + containerMemoryLimit := float64(container.Resources.Limits.Memory().Value()) / 1000000000.00 + if containerMemoryLimit < 0.10 { + containerMemoryLimit = 0.10 + } + + limits.MemoryInGB = &containerMemoryLimit + } + + requirements.Limits = &limits + } + + codePackage.Resources = &requirements + + if len(container.Command) > 0 { + codePackage.Commands = &container.Command + } + + if len(container.Env) > 0 { + envVars := []servicefabricmesh.EnvironmentVariable{} + + for _, envVar := range container.Env { + env := servicefabricmesh.EnvironmentVariable{} + env.Name = &envVar.Name + env.Value = &envVar.Value + + envVars = append(envVars, env) + } + + codePackage.EnvironmentVariables = &envVars + } + + endpoints := []servicefabricmesh.EndpointProperties{} + + for _, port := range container.Ports { + endpoint := p.getEndpointFromContainerPort(port) + endpoints = append(endpoints, endpoint) + } + + if len(endpoints) > 0 { + codePackage.Endpoints = &endpoints + } + + codePackages = append(codePackages, codePackage) + } + + serviceProperties := servicefabricmesh.ServiceResourceProperties{} + serviceProperties.OsType = servicefabricmesh.Linux + replicaCount := int32(1) + serviceProperties.ReplicaCount = &replicaCount + serviceProperties.CodePackages = &codePackages + service.ServiceResourceProperties = &serviceProperties + services = append(services, service) + properties.Services = &services + + return meshApp, nil +} + +func (p *SFMeshProvider) getMeshNetwork(pod *v1.Pod, meshApp servicefabricmesh.ApplicationResourceDescription, location string) servicefabricmesh.NetworkResourceDescription { + network := servicefabricmesh.NetworkResourceDescription{} + network.Name = meshApp.Name + network.Location = &location + + networkProperties := servicefabricmesh.NetworkResourceProperties{} + addressPrefix := "10.0.0.4/22" + networkProperties.AddressPrefix = &addressPrefix + + layers := []servicefabricmesh.Layer4IngressConfig{} + + service := (*meshApp.Services)[0] + + for _, codePackage := range *service.CodePackages { + for _, endpoint := range *codePackage.Endpoints { + layer := p.getLayer(&endpoint, *meshApp.Name, *service.Name) + layers = append(layers, layer) + } + } + + ingressConfig := servicefabricmesh.IngressConfig{} + ingressConfig.Layer4 = &layers + + networkProperties.IngressConfig = &ingressConfig + network.NetworkResourceProperties = &networkProperties + + return network +} + +func (p *SFMeshProvider) getLayer(endpoint *servicefabricmesh.EndpointProperties, appName string, serviceName string) servicefabricmesh.Layer4IngressConfig { + layer := servicefabricmesh.Layer4IngressConfig{} + name := *endpoint.Name + "Ingress" + layerName := &name + layer.Name = layerName + layer.PublicPort = endpoint.Port + layer.EndpointName = endpoint.Name + layer.ApplicationName = &appName + layer.ServiceName = &serviceName + + return layer +} + +func (p *SFMeshProvider) getEndpointFromContainerPort(port v1.ContainerPort) servicefabricmesh.EndpointProperties { + endpoint := servicefabricmesh.EndpointProperties{} + endpointName := strconv.Itoa(int(port.ContainerPort)) + "Listener" + endpoint.Name = &endpointName + endpoint.Port = &port.ContainerPort + + return endpoint +} + +// CreatePod accepts a Pod definition and creates a SF Mesh App. +func (p *SFMeshProvider) CreatePod(pod *v1.Pod) error { + log.Printf("receive CreatePod %q\n", pod.Name) + + meshApp, err := p.getMeshApplication(pod) + if err != nil { + return err + } + + meshNetwork := p.getMeshNetwork(pod, meshApp, p.region) + _, err = p.networkClient.Create(context.Background(), p.resourceGroup, *meshNetwork.Name, meshNetwork) + if err != nil { + return err + } + + networkName := *meshNetwork.Name + resourceID := "/subscriptions/" + os.Getenv("AZURE_SUBSCRIPTION_ID") + "/resourceGroups/" + p.resourceGroup + "/providers/Microsoft.ServiceFabricMesh/networks/" + networkName + + service := (*meshApp.Services)[0] + + networkRef := servicefabricmesh.NetworkRef{} + networkRef.Name = &resourceID + + networkRefs := []servicefabricmesh.NetworkRef{} + networkRefs = append(networkRefs, networkRef) + + service.NetworkRefs = &networkRefs + + _, err = p.appClient.Create(context.Background(), p.resourceGroup, pod.Name, meshApp) + if err != nil { + return err + } + + return nil +} + +// UpdatePod updates the pod running inside SF Mesh. +func (p *SFMeshProvider) UpdatePod(pod *v1.Pod) error { + log.Printf("receive UpdatePod %q\n", pod.Name) + + app, err := p.getMeshApplication(pod) + if err != nil { + return err + } + + _, err = p.appClient.Create(context.Background(), p.resourceGroup, pod.Name, app) + if err != nil { + return err + } + + return nil +} + +// DeletePod deletes the specified pod out of SF Mesh. +func (p *SFMeshProvider) DeletePod(pod *v1.Pod) (err error) { + log.Printf("receive DeletePod %q\n", pod.Name) + + _, err = p.appClient.Delete(context.Background(), p.resourceGroup, pod.Name) + if err != nil { + return err + } + + return nil +} + +// GetPod returns a pod by name that is running inside SF Mesh. +// returns nil if a pod by that name is not found. +func (p *SFMeshProvider) GetPod(namespace, name string) (pod *v1.Pod, err error) { + log.Printf("receive GetPod %q\n", name) + + resp, err := p.appClient.Get(context.Background(), p.resourceGroup, name) + httpResponse := resp.Response.Response + + if err != nil { + if httpResponse.StatusCode == 404 { + return nil, nil + } + return nil, err + } + + if resp.Tags == nil { + return nil, nil + } + + val, present := resp.Tags["NodeName"] + + if !present { + return nil, nil + } + + if *val != p.nodeName { + return nil, nil + } + + pod, err = p.applicationDescriptionToPod(resp) + if err != nil { + return nil, err + } + + return pod, nil +} + +func (p *SFMeshProvider) appStateToPodPhase(state string) v1.PodPhase { + switch state { + case "Succeeded": + return v1.PodRunning + case "Failed": + return v1.PodFailed + case "Canceled": + return v1.PodFailed + case "Creating": + return v1.PodPending + case "Updating": + return v1.PodPending + } + + return v1.PodUnknown +} + +func (p *SFMeshProvider) appStateToPodConditions(state string, transitiontime metav1.Time) []v1.PodCondition { + switch state { + case "Succeeded": + return []v1.PodCondition{ + v1.PodCondition{ + Type: v1.PodReady, + Status: v1.ConditionTrue, + LastTransitionTime: transitiontime, + }, v1.PodCondition{ + Type: v1.PodInitialized, + Status: v1.ConditionTrue, + LastTransitionTime: transitiontime, + }, v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + LastTransitionTime: transitiontime, + }, + } + } + return []v1.PodCondition{} +} + +func (p *SFMeshProvider) getMeshService(appName string, serviceName string) (servicefabricmesh.ServiceResourceDescription, error) { + svc, err := p.serviceClient.Get(context.Background(), p.resourceGroup, appName, serviceName) + if err != nil { + return servicefabricmesh.ServiceResourceDescription{}, err + } + + return svc, err +} + +func appStateToContainerState(state string, appStartTime metav1.Time) v1.ContainerState { + if state == "Succeeded" { + return v1.ContainerState{ + Running: &v1.ContainerStateRunning{ + StartedAt: appStartTime, + }, + } + } + + if state == "Failed" || state == "Canceled" { + return v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "", + Message: "", + StartedAt: appStartTime, + FinishedAt: metav1.NewTime(time.Now()), + }, + } + } + + return v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "", + Message: "", + }, + } +} + +func (p *SFMeshProvider) getMeshNetworkPublicIP(networkName string) (*string, error) { + network, err := p.networkClient.Get(context.Background(), p.resourceGroup, networkName) + if err != nil { + return nil, err + } + + ipAddress := network.IngressConfig.PublicIPAddress + return ipAddress, nil +} + +func (p *SFMeshProvider) applicationDescriptionToPod(app servicefabricmesh.ApplicationResourceDescription) (*v1.Pod, error) { + var podCreationTimestamp metav1.Time + + if *app.Tags["CreationTimestamp"] != "" { + t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", *app.Tags["CreationTimestamp"]) + if err != nil { + return nil, err + } + podCreationTimestamp = metav1.NewTime(t) + } + + containerStartTime := podCreationTimestamp + + appState := app.ProvisioningState + podPhase := p.appStateToPodPhase(*appState) + podConditions := p.appStateToPodConditions(*appState, podCreationTimestamp) + + service, err := p.getMeshService(*app.Name, (*app.ServiceNames)[0]) + + containers := []v1.Container{} + containerStatuses := []v1.ContainerStatus{} + + for _, codePkg := range *service.CodePackages { + container := v1.Container{} + container.Name = *codePkg.Name + container.Image = *codePkg.Image + + if codePkg.Commands != nil { + container.Command = *codePkg.Commands + } + + container.Resources = v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", *codePkg.Resources.Requests.CPU)), + v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%gG", *codePkg.Resources.Requests.MemoryInGB)), + }, + } + + if codePkg.Resources.Limits != nil { + container.Resources.Limits = v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%g", *codePkg.Resources.Limits.CPU)), + v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%gG", *codePkg.Resources.Limits.MemoryInGB)), + } + } + + containerStatus := v1.ContainerStatus{ + Name: *codePkg.Name, + State: appStateToContainerState(*appState, podCreationTimestamp), + Ready: podPhase == v1.PodRunning, + Image: container.Image, + ImageID: "", + ContainerID: "", + } + + containerStatuses = append(containerStatuses, containerStatus) + containers = append(containers, container) + } + + appName := app.Name + ipAddress := "" + meshIP, err := p.getMeshNetworkPublicIP(*appName) + if err != nil { + return nil, err + } + + if meshIP != nil { + ipAddress = *meshIP + } + + pod := v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: *app.Tags["PodName"], + Namespace: *app.Tags["Namespace"], + ClusterName: *app.Tags["ClusterName"], + UID: types.UID(*app.Tags["UID"]), + CreationTimestamp: podCreationTimestamp, + }, + Spec: v1.PodSpec{ + NodeName: *app.Tags["NodeName"], + Volumes: []v1.Volume{}, + Containers: containers, + }, + Status: v1.PodStatus{ + Phase: podPhase, + Conditions: podConditions, + Message: "", + Reason: "", + HostIP: "", + PodIP: ipAddress, + StartTime: &containerStartTime, + ContainerStatuses: containerStatuses, + }, + } + + return &pod, nil +} + +// GetContainerLogs retrieves the logs of a container by name. +func (p *SFMeshProvider) GetContainerLogs(namespace, podName, containerName string, tail int) (string, error) { + log.Printf("receive GetContainerLogs %q\n", podName) + return "", nil +} + +// GetPodFullName gets the full pod name as defined in the provider context +func (p *SFMeshProvider) GetPodFullName(namespace string, pod string) string { + return "" +} + +// ExecInContainer executes a command in a container in the pod, copying data +// between in/out/err and the container's stdin/stdout/stderr. +func (p *SFMeshProvider) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { + log.Printf("receive ExecInContainer %q\n", container) + return nil +} + +// GetPodStatus returns the status of a pod by name that is "running". +// returns nil if a pod by that name is not found. +func (p *SFMeshProvider) GetPodStatus(namespace, name string) (*v1.PodStatus, error) { + pod, err := p.GetPod(namespace, name) + if err != nil { + return nil, err + } + + if pod == nil { + return nil, nil + } + + return &pod.Status, nil +} + +// GetPods returns a list of all pods known to be running within SF Mesh. +func (p *SFMeshProvider) GetPods() ([]*v1.Pod, error) { + log.Printf("receive GetPods\n") + + var pods []*v1.Pod + + list, err := p.appClient.ListByResourceGroup(context.Background(), p.resourceGroup) + if err != nil { + return pods, err + } + + apps := list.Values() + + for _, app := range apps { + if app.Tags == nil { + continue + } + + val, present := app.Tags["NodeName"] + + if !present { + continue + } + + if *val != p.nodeName { + continue + } + + pod, err := p.applicationDescriptionToPod(app) + if err != nil { + return pods, err + } + + pods = append(pods, pod) + } + + return pods, nil +} + +// Capacity returns a resource list containing the capacity limits set for SF Mesh. +func (p *SFMeshProvider) Capacity() v1.ResourceList { + return v1.ResourceList{ + "cpu": resource.MustParse(defaultCPUCapacity), + "memory": resource.MustParse(defaultMemoryCapacity), + "pods": resource.MustParse(defaultPodCapacity), + } +} + +// NodeConditions returns a list of conditions (Ready, OutOfDisk, etc), for updates to the node status +// within Kubernetes. +func (p *SFMeshProvider) NodeConditions() []v1.NodeCondition { + // TODO: Make this configurable + return []v1.NodeCondition{ + { + Type: "Ready", + Status: v1.ConditionTrue, + LastHeartbeatTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + Reason: "KubeletReady", + Message: "kubelet is ready.", + }, + { + Type: "OutOfDisk", + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + Reason: "KubeletHasSufficientDisk", + Message: "kubelet has sufficient disk space available", + }, + { + Type: "MemoryPressure", + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + Reason: "KubeletHasSufficientMemory", + Message: "kubelet has sufficient memory available", + }, + { + Type: "DiskPressure", + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + Reason: "KubeletHasNoDiskPressure", + Message: "kubelet has no disk pressure", + }, + { + Type: "NetworkUnavailable", + Status: v1.ConditionFalse, + LastHeartbeatTime: metav1.Now(), + LastTransitionTime: metav1.Now(), + Reason: "RouteCreated", + Message: "RouteController created a route", + }, + } + +} + +// NodeAddresses returns a list of addresses for the node status +// within Kubernetes. +func (p *SFMeshProvider) NodeAddresses() []v1.NodeAddress { + return []v1.NodeAddress{ + { + Type: "InternalIP", + Address: p.internalIP, + }, + } +} + +// NodeDaemonEndpoints returns NodeDaemonEndpoints for the node status +// within Kubernetes. +func (p *SFMeshProvider) NodeDaemonEndpoints() *v1.NodeDaemonEndpoints { + return &v1.NodeDaemonEndpoints{ + KubeletEndpoint: v1.DaemonEndpoint{ + Port: p.daemonEndpointPort, + }, + } +} + +// OperatingSystem returns the operating system for this provider. +// This is a noop to default to Linux for now. +func (p *SFMeshProvider) OperatingSystem() string { + return providers.OperatingSystemLinux +} diff --git a/vkubelet/provider.go b/vkubelet/provider.go index 0df690437..081157b58 100644 --- a/vkubelet/provider.go +++ b/vkubelet/provider.go @@ -11,6 +11,7 @@ import ( "github.com/virtual-kubelet/virtual-kubelet/providers/huawei" "github.com/virtual-kubelet/virtual-kubelet/providers/hypersh" "github.com/virtual-kubelet/virtual-kubelet/providers/mock" + "github.com/virtual-kubelet/virtual-kubelet/providers/sfmesh" "github.com/virtual-kubelet/virtual-kubelet/providers/vic" "github.com/virtual-kubelet/virtual-kubelet/providers/web" "k8s.io/api/core/v1" @@ -28,6 +29,7 @@ var _ Provider = (*mock.MockProvider)(nil) var _ Provider = (*huawei.CCIProvider)(nil) var _ Provider = (*azurebatch.Provider)(nil) var _ Provider = (*cri.CRIProvider)(nil) +var _ Provider = (*sfmesh.SFMeshProvider)(nil) // Provider contains the methods required to implement a virtual-kubelet provider. type Provider interface { diff --git a/vkubelet/vkubelet.go b/vkubelet/vkubelet.go index a1f5af32c..09e20174d 100644 --- a/vkubelet/vkubelet.go +++ b/vkubelet/vkubelet.go @@ -18,6 +18,7 @@ import ( "github.com/virtual-kubelet/virtual-kubelet/providers/huawei" "github.com/virtual-kubelet/virtual-kubelet/providers/hypersh" "github.com/virtual-kubelet/virtual-kubelet/providers/mock" + "github.com/virtual-kubelet/virtual-kubelet/providers/sfmesh" "github.com/virtual-kubelet/virtual-kubelet/providers/vic" "github.com/virtual-kubelet/virtual-kubelet/providers/web" corev1 "k8s.io/api/core/v1" @@ -31,7 +32,7 @@ import ( ) const ( - PodStatusReason_ProviderFailed = "ProviderFailed" + PodStatusReason_ProviderFailed = "ProviderFailed" ) // Server masquarades itself as a kubelet and allows for the virtual node to be backed by non-vm/node providers. @@ -127,6 +128,11 @@ func New(nodeName, operatingSystem, namespace, kubeConfig, taint, provider, prov if err != nil { return nil, err } + case "sfmesh": + p, err = sfmesh.NewSFMeshProvider(rm, nodeName, operatingSystem, internalIP, daemonEndpointPort) + if err != nil { + return nil, err + } default: fmt.Printf("Provider '%s' is not supported\n", provider) } @@ -236,7 +242,7 @@ func (s *Server) Run() error { log.Fatal("Failed to watch pods", err) } - loop: + loop: for { select { case ev, ok := <-s.podWatcher.ResultChan(): @@ -289,10 +295,10 @@ func (s *Server) updateNode() { } if errors.IsNotFound(err) { - if err = s.registerNode(); err != nil { + if err = s.registerNode(); err != nil { log.Println("Failed to register node:", err) - return - } + return + } } n.ResourceVersion = "" // Blank out resource version to prevent object has been modified error @@ -339,7 +345,7 @@ func (s *Server) reconcile() { for _, p := range providerPods { if p.Namespace == pod.Namespace && p.Name == pod.Name { providerPod = p - break; + break } } From ebdb9257a0492aa15f289120583f7d52b15dca44 Mon Sep 17 00:00:00 2001 From: yaron2 Date: Wed, 1 Aug 2018 15:17:00 -0700 Subject: [PATCH 14/21] spaces fix --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 40c50ec8d..95cf4d659 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ The best description is "Kubernetes API on top, programmable back." + [Azure Batch GPU Provider](./providers/azurebatch/README.md) + [AWS Fargate Provider](#aws-fargate-provider) + [Hyper.sh Provider](#hypersh-provider) - + [Service Fabric Mesh Provider](#service-fabric-mesh-provider) + + [Service Fabric Mesh Provider](#service-fabric-mesh-provider) + [Adding a New Provider via the Provider Interface](#adding-a-new-provider-via-the-provider-interface) * [Testing](#testing) + [Testing the Azure Provider Client](#testing-the-azure-provider-client) From fdb2c29ea3a2b50217350783cfe8eb289a32749b Mon Sep 17 00:00:00 2001 From: yaron2 Date: Wed, 1 Aug 2018 16:23:22 -0700 Subject: [PATCH 15/21] removed redundant memory assignment --- providers/sfmesh/sfmesh.go | 1 - 1 file changed, 1 deletion(-) diff --git a/providers/sfmesh/sfmesh.go b/providers/sfmesh/sfmesh.go index f551cad6e..b9b3b0c5b 100644 --- a/providers/sfmesh/sfmesh.go +++ b/providers/sfmesh/sfmesh.go @@ -268,7 +268,6 @@ func (p *SFMeshProvider) getMeshApplication(pod *v1.Pod) (servicefabricmesh.Appl containerMemoryRequest = 0.10 } - containerMemoryRequest = 1 memoryRequest = containerMemoryRequest } } From 1ee747cb6f291017a84793637cc328d646a5eae8 Mon Sep 17 00:00:00 2001 From: yaron2 Date: Wed, 1 Aug 2018 16:25:47 -0700 Subject: [PATCH 16/21] assigned subscriptionID to provider --- providers/sfmesh/sfmesh.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/providers/sfmesh/sfmesh.go b/providers/sfmesh/sfmesh.go index b9b3b0c5b..8d656a2d6 100644 --- a/providers/sfmesh/sfmesh.go +++ b/providers/sfmesh/sfmesh.go @@ -43,6 +43,7 @@ type SFMeshProvider struct { serviceClient *servicefabricmesh.ServiceClient region string resourceGroup string + subscriptionID string resourceManager *manager.ResourceManager } @@ -111,6 +112,7 @@ func NewSFMeshProvider(rm *manager.ResourceManager, nodeName, operatingSystem st region: region, resourceGroup: resourceGroup, resourceManager: rm, + subscriptionID: azureSubscriptionID, } return &provider, nil } @@ -417,7 +419,7 @@ func (p *SFMeshProvider) CreatePod(pod *v1.Pod) error { } networkName := *meshNetwork.Name - resourceID := "/subscriptions/" + os.Getenv("AZURE_SUBSCRIPTION_ID") + "/resourceGroups/" + p.resourceGroup + "/providers/Microsoft.ServiceFabricMesh/networks/" + networkName + resourceID := "/subscriptions/" + p.subscriptionID + "/resourceGroups/" + p.resourceGroup + "/providers/Microsoft.ServiceFabricMesh/networks/" + networkName service := (*meshApp.Services)[0] From bf014c1c7e84886315ad4b68a5f0f32360cbfd40 Mon Sep 17 00:00:00 2001 From: Jeremy Rickard Date: Wed, 1 Aug 2018 17:54:17 -0600 Subject: [PATCH 17/21] Add certificate generation to Helm charts (#286) Signed-off-by: Jeremy Rickard --- charts/virtual-kubelet-0.1.3.tgz | Bin 0 -> 2609 bytes charts/virtual-kubelet-for-aks-0.1.6.tgz | Bin 0 -> 2157 bytes charts/virtual-kubelet-for-aks-latest.tgz | Bin 1946 -> 2157 bytes charts/virtual-kubelet-for-aks/Chart.yaml | 2 +- .../templates/NOTES.txt | 9 ++++++++- .../templates/secrets.yaml | 16 +++++++++++++--- charts/virtual-kubelet-latest.tgz | Bin 2284 -> 2609 bytes charts/virtual-kubelet/Chart.yaml | 2 +- charts/virtual-kubelet/templates/NOTES.txt | 7 +++++++ charts/virtual-kubelet/templates/secrets.yaml | 14 ++++++++++++-- 10 files changed, 42 insertions(+), 8 deletions(-) create mode 100644 charts/virtual-kubelet-0.1.3.tgz create mode 100644 charts/virtual-kubelet-for-aks-0.1.6.tgz diff --git a/charts/virtual-kubelet-0.1.3.tgz b/charts/virtual-kubelet-0.1.3.tgz new file mode 100644 index 0000000000000000000000000000000000000000..7ec58eb1f08d1b3da95dab0c889b1cd65414e704 GIT binary patch literal 2609 zcmV-13eNQ(iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI?OZ`(K$-@o-K2HIVqS=5T1R}aE=ajD~Ljihm4J6&|Ihk%yG zHXDjmNy@1=^>;szl59Pk*EUGISo5DaBV?$z+fyR7HzyJM z;n7O7*=)Yp-?x98&1Us)bNAq2`PvcxTPdY6Zu24`L#IJ$^A`Ah$@gY z)>LpG2)scSL14}RXaSTfO&EjhkU(}wOg#+|2plIPlpHlG2w0*tO3#T1<(g2AQu$5; zFd~%s@G;>ZDgOEIhz3$9G1gunA_stz2z}^_(TF1aF(rKBq*axJ=_rxN04?NH>sa~Q z+Y_p%$;be0SC4VE_Y9@7Q*3%IZ`b+OtN#6z+QJc*pn6;axWoQit>%8!{`YoXZ0!Fj z3XRAFeFs2dEELs3&V5)KY0ZuRG@1Coh(=}pixbAq1f#*+heLKn=E^}nGkE0BL}Hs! zQ#YPlo(XK0vc`Cm^B z=W*IdVyaNiP_|L(a!syTB)C}&3=N`+Nr1k?#Ds9d=9&h|Oc6O5F${ee6Q|Y0$Poz)K*qnT0gR3`z{4OA z3D>7f&{!-SXE?>W>#1OZT2nrO5Mw6h z7=qw17l~ZCpw3Vj=A+{|=qb@PkSS4Mwq0ZR9Av_M#{t_IXa^|;F35tlDX-CJ5CrgATD^YSh$X3RvDQ>JFg!fwOBZSPQTyl`|w^Q zKqNv$l`?B2@l!%2hLHMoKsji$sw*!K#+0E4rYzweQ9EZir7GL12)TwU${36gC=Wyw zGt>wP*OY;|7>#aAmB$oWc%#ZNB{PH(B8OOt84WQ^H$I6&qH(pa08{g*N#cnlAwqRa z&2E0MVvUAE4Ykw%^4Ish^M2#-kMn+~ebVim4!cK1kjq9>>@hfhHE8#{XTxsqwA`;^OnIkvx8E7`&in1o>wfS2Ebn1Eq|VXHUr#7c zK7aY0@=#o<-<}&s#LK*pm%mys_$!}+cfJ3b&ArO~ zf3SbBv$_ADqFi5Z{{k}_`C!P;5UVys5Se8Ors%^j+lz%`q@2#@*jTV3V{^HI7~9`A zTI4yITmwQJQ;y)K7hq#mYo9z`8ep`{o6~@i?9EV08X|lFEfXHV!LI#Dqd_tr)6d{G zR$#^s8!7Q@|KhYInp>AF$1vCL96lw4(J{pkNE};@J?D2!(XG6Oojt3)f)NHJQ3y&z zSPW{}d1m*JxmFpCsG$xmKpQf7Hk#WtI655|S##s67=x`f%dzqa@>%?3(fybAUz@yV zEGgGxaDP^fpOss=&r;ZjEaSI6gPZd0%FXz%hz2etHRATdO)DPhX{ zs06P=x4Rd4G~vRA6Y9u?39pUu6((JVeZ5xE>+7YX>|UYLiSc}^_!IMBUk7~F9d!Ed zI{k}wr$2n~a-h^+jL|Ky$DQ{NMZLs%3AB6_7n4n1mT6iyvylA*tEyrMR967( zjP2JJB%pO5n{6%NKO zbe2~s*OEZf86Ht-&2--3VJs-u=aQvz`PXDjOepu3pS}prC{O7w#gUow@|JKiL3txi zWZUh#X~mdpRe3E*Jk5QgTwg*44yHnbYZ4Towm{i zHS2D%a!=p#RJQ3B+p~C9CiC0tBF8w-HA=Jnb;$Ji+<02*1KiS~G#n4}pZ9u0)O`Lj zF@JCipRyI`v`~MH^HqZ}@nzn=*cYfOB=m(W4eYL0wAg)XEv}jQ*8rRTTgA15e{2@y zHvi9hznSyDc zVQyr3R8em|NM&qo0PI?AZ`(K$?q~gqfx8Q|i&~bGI6ep{aIWL+2FWFX?e=hcI0Upb zw%JgmN>Xm!)c^fJ%Chz1#7<8 z+MSb#z4&4BJkM*lTK2E!dF8*}Vf%3P+m1I|t;6;U`0H1|TGKx&sJUqBKg^i3sJIP>xdh zP8~2Jl=<)};h!je`)5Q0DU=v%HxQ8nz)6HY^u=gI5&oPKK5^2aJG9bKB9Q@F=&9DR z@*9l_)zf5T02`%Qo?G;P3A)SvXM`oFevSj!X8-NxQQ7|6?W4W@ zKLyc%oaNY2!^qh!@soCFG)-l${EToN?jbub`}Y4gn^+!R51zAcbu1Sy7Y)AI3@|x(0|wIf)orD5FWya z@{sb0a^#2vMhZ(0>VuXEIsh+Urc+2vVd;+2)jwvwY1SN}CQZW&-PEnx1wi5$CGx<) zn1Mcy{mk*!Nf=T7nESuW{xwFiz2(Nm<*+w!^;i999ouIA$8E1<|LudL=HC9Ff`(J9 zhIa)M)SB`Mgcvh1#}EXExk%(X(mO+8G??gNx2?F?N?R*Rn>~_a|!zpF+R5r z?&KCe3No6)7ytugXR&bdg-Ol6 zB#E91Nxu=3mAE(mP%ae>lfvZ@*3)cic0+mSXT?lkFnpKlkrSaNA<@KN^@8q6@f?+m zK4GA3_a}uMpq$YFJ3$~4u6F`SdD4Oo?n*tKtiDQXvt_|~=lM-yTRvibGX>c!p~3p# zpPbA+g0|a#S^`AKAKeb#V*f`+2M6W+@3?iixBsW0yoKXejokcHjjRii>h;M=1s-a$ zF$T-#5~Bf8J~SOzv70vlc|=2ujF749$rxa~&7=Xcg$xB^-GWX>>N`1xl+uP6)N*!f zhun383>FK&@>-LLb#?`ut6Y1mGGpC4%dfv|pA|QL_TgRcyf?hQy6g{EF9B?m)`wct zJ7{^e%B)UzaNX_qPI?!^)6V(z`DM2=JiWZA0yIzu4HBq^?Y_C6lsD?B9Q3;V zUgaWwmm>0Wg8&KxiTVuZ%UF`9-8C|$8=U(P*c}cm?1t+;z3%r0mmm7w-u3(b<%g^7 z$1ZhMH=f+WzTc=gx3{a}rS=7tPE0jY#-F5lKo$7vbkOU6?Dem^z5Z~UdX_)#1Squ| zV{{MfS?@m&MZLlKM$nII3p}|#tq8Vy^|ol-aP4;dis-2iN)w$ZHx^+Ia3)w1;lE5i zqpE%&kQgG@lrWXUjSP2+S=)4~s%Id9s;{x`gDQH3HBum=h?v;$-&%RK|E*oOoG8Ol zuP@tXvi1gQ7S8<0bpdN{3c6I(qG~T5zQgB4Xp^g@I3h%AIYpTA&I+j6=Ykv-STLbV zFz1vfU#pYT4+`b2n9!q30I2{25j99at3dWUMGhwuV5$q1D7HxnQcsgtwshoTM=!HQ zF{VPLT9I2IwpwD9;U=Xe%FJna_Z%?k?#N*23pAV z?oD#zu6J!}7@>S%=AxfFBYRrfp8uzc^dp~wxAlLI+U5G+Yqs|F|C3Ph)1#Bs)i%L+ zSgxNM`DIK#C+K9;e(Fb=6z-RRfd3XI$h)12i?0Wy28YKjFVD{nf39a*p0ir0+)75%M6#b}gReS>{nymNT4ZI|5`1H$bLg@J?;1E^yC;!g4%J7t1Bl z{yk|&@&6Uzdv=g@pnh)S{Ry;-|F?0Nq;dE$VPISTx7lu%{C~6Mwf6r1DQNfpZ!S6% jx~>n45`TrB=brYor#Xm!)c^fJ%Chz1#7<8 z+MSb#z4&4BJkM*lTK2E!dF8*}Vf%3P+m1I|t;6;U`0+CL=pa+5DCC8&Om1K4K&?dDP0{@d-N zz5PE0(TGgYcK{^DLQyT`+=rE&R!j&$lZg+EXjJAYP8hoqj0STbI_wLXD+l?^;E`{M z#4e-A^=arsapVl=>!BfXQe+w=qm@_6#sk{N)coH|k$=SN!HG3#<4r_}7uJ0&ABM&m zt)4E`a2}^GNlX>W8Okn7T^z}F771>IfuTWEF$vIjoR@IA^oS=oCJEEff7j`P6buy* z9>R$7kn)Lg5kIXKW4sZ)*PWGO~VV_)UDbDK;jrB z^1#5Dfqy=Z{mk*!Nf=T7nESuW{xwFiz2(Nm<*+w!^;i999ouIA$8E1<|LudL=HC9F zf`(J9hIa)M)SB`Mgcvh1#}EXExk%(X(mO+8G?@khQ-2hE`&C|Vt77)8=ON-s01@BmSRRj3}GxJ zd_03chQljhRB7ZWUFfX2zYrrvj*{pl`&=Y045u_dzGA1!;;V#eGGb^a#8QMwV7wqC z3V+@7jL1V+EPmGgZ@2&JDY6))a&`R3RA7t!H$ATu{|~(8!NK1CpMq|08^6JfMm{Li zFs2OkJjSJbH>YaR)KBziqYPI!nGfggB-gp_UqFeLZRe zrkfU)#?V(5XgDutC?yRMzMD|V12{gkKYwX7NXBFO6>9Z0m@xtKJKa}TqVWi1wT5vG zbNHMPM#mIGAaQJW>^lF(6y5S`I5@JqS1`hWBnm-^2;nRlq2#DhrRN+|WT65g5g3iA zv05!a8#{hAnp+v1Tnr4a@t7*c;PnP^to>DXieD_c|8{rTWHVz)xgJC9S5^O2)qjfa za|!zpF+R5r?&KCe3No6)7ytug zXR&bdg-Ol6B#E91Nxu=3mAE(mP%ae>lfvZ@*3)cic0+mSXT?lkFnpKlkrSaNA<@KN z^@8q6@f?+mK4GA3_a}uMpq$YFJAXkS60UawNqN$O4(>`lovgk}YqMp+dFS~}Vp~39 zelrExETO^r;Gdk#J%YB|e_8@W$RFJf-eUhpM+XPx{O`DRxVQhOpuC0SSB>2KRE?|) zk?QryN(CNjvM~nB<`Sa;Q9d*sSh1To0C_}1jEs<}?8z8lyv?KmvV{x+b%P8R3%~MOlZkb91)Qr~d#o~J-8;*#zigisH-7fvUGKa%yuP~Z z4_7Y%Y?Ic9TGKmdd9}){PIqwK?e|W47sJ!e`Stl_w=+Dwyr=>+PzMbXsD|ynxu2Bn zN|nI={Lt@R4|^A#i{bU@Nq-e2cRpKE=UaRE0~P@XAKneR{nIP6v7KRy{lUAzyXU99 z2kbWMR&E6x^t%0Ci9S$t)hU-4P?)L_l zANt+i_51$ihpX+!E_GEmp4`H|->5jZx2xi%_63zrOf^!*pQL#}6@U2ZbkOU6?Dem^ zz5Z~UdX_)#1Squ|V{{MfS?@m&MZLlKM$nII3p}|#tq8Vy^|ol-aP4;dis-2iN)w$Z zHx^+Ia3)w1;lE5iqpE%&kQgG@lrWXUjSP2+S=)4~s%Id9s;{x`gDQH3HBum=h?v;$ z-&%RK|E*oOoG8OluYWJwX0rAMY8KA?$aMj0Zwk6p)S_xH9=^lpL}-(%r8pu)YdJ-j z^3DpV+2?{B7FaN$N-*b?Cts_R(+>*et(efGO8}_=0}(YyK&wFZJ4FsB6JV+fl_<7J z2~tm!SGIKIVn;8tMKPvArCO0&Ahud!mE>mCG1(MB0JQz^@_#$EX|bXEtB&jd`Q(1r zgRFAu<+G9R02u~a$o1|`a^tRdZE6^yd|>9HpF1OaTH2ofr;79=pMtmbe~;Sb`rm7| z_VxdhQ1R2FlhxHW!FX7%pBnjPOg<;*WYd1?N0}7vmwTK@PqpnaMdh$&>|*fT8eH!Cmcf~ zd@CPL8N6at>D)%htcs7@*u@_EuQ6B=22CaGn{8T0%Qs|K&D~v zPHm|!aLn45`TrB=brYor#8UFjJ+Zg;p0!dcMu0(yY-R^$+t?KEj>KPR(A*_CzjF6$KkGZImTiIw& ziBR^OgzVjm#c>?xq}4J{$8pL}=kVlk^=`_Wt=8emJ9v4|JAX(b6%p?o=cPF1$^Atn zKcUS2)^EXP#kC^6=4jPN&-tI0U7~`;OwFgT)=b;4h7Xr(22B78K^ zQ>9|*HX0MErhmyu12#&n%HP%ug*5YY?55qa-g@c(6!MV$&j?FU{xJlw&HhiCM`imz zIXT+f|7!>h$pl>sKw!)zRb0$nSlMaCga8zoxWI@)slVccv1`t#KX>7beIs*eVKmct ze)KB_55!-CHp@)ICS>* z{~9uwV)b~JFhQkgG=TtP#^)FSk6_Ldu|Dao0oNK#$8%6qqADO$B0(>$!r%jlByuea z%uio2sMn*+tWNLm!S;|Lk!Y`s4HgSM28ekep?}VxM&el4%+wNU)D(^54t#9hO|)QJ%X)PPa7-hFT56p2b*VWp?KH5|h7R`U z)PKp;HG4A@f(8gb^idi4aC~T9Y1mK3V|oX*`Wg(Qz`Up9Y75kiKvsQdYM8^T*OzJh}zGkOUl^hqKSBo7fTk`ams70Ps;V~Q-0Km-D#AypXY*ho#O zgr7%qvj*o^eO*^Gri{`5Wy5l;`6`>m-+v6cd%FADWG!PsBQ=KFPqO}#tQF~V3Hy{~ zd}|%t%iGCz{$HN>+T~ojw@-}wiB0c|N^ob zDrrItYB{?#MfN&D28)GT`K`#rkX-@iI+q@+v{>iP^8GK{gJ+v{HIf$a=i%nv>c-oEU1o-o=h zTe%gm-)VcD%0~Pqc+RDnWmPxM+`xX?sDC*3_p9Kg_6?;v~b^2iFP z*>yn<3k(>SC74SZC3n@v>4idhDEjD8C4h7ReI7Q5Ppd%Yo+5{nEnun(l~Zh!5~NI% zSGKg|YDbpYq8L*yQ-7_(Dwc=?SQw^+;LQAaj`ymPia z@>{QO9~t_Ji}mQWb7QYZtz#H2U0`~n-`XR4UD@vcr;7BFtKe<%f6J-(|INeWz5o9j zQoMS!vbfqf7*F%{QzP%iM9{jub89 z0zD7w0_P>Dyd&j4a`Y7Mtl30{tP*~>QzErDClUMM z(Mq$~Y`)mvw||?>X7z7#_uydp+t#=CcABjp;PEuLr6fud`F}&R`L#IJ$^A`Ah$@gY z)>LpG2)scSL14}RXaSTfO&EjhkU(}wOg#+|2plIPlpHlG2w0*tO3#T1<(g2AQu$5; zFd~%s@G;>ZDgOEIhz3$9G1gunA_stz2z}^_(TF1aF(rKBq*axJ=_rxN04?NH>sa~Q z+Y_p%$;be0SAUOjwf79AvQun&EpONP)~o*gl-j}(mY{lE0=UEeTdn4P)&BQ(UTp0D zDGH6q1bqiUVk{KZLe70y8fndr05qBSz=%d={)-dF&IF^u+=oMUMdr#uJ~Mda&qQLI zQRKQC`cSSp#`&shh@6y#2FYm2N_l-i`r-4)6jo)*aj&WDj+#rW;4*uWSJJ_x8|Ne{S?uP%L zqFi4$fQ~Kwc<<7?ANkBHvG+|a$2hMyf&{!-SXE?>W>#1OZT2nrO z5Mw6h7=qw17l~ZCpw3Vj=A+{|=qb@PkSS4Mwq0ZR9Av_M#{t_IXa^|;F35tlDX-CJ5CrgATD^YSh$X3RvDQ>JFg!fwOBZSPQTyl z`+x9WBtRrWM3pjYB=J*1C5DjtbwD|2v#KjE55|L!xoDuK-i?s7d09 zBq2g|OU-V6uwspdLJhUk|MJ)Oz4LzK@PCi?C{I3r`JM7mT&drl8%M;;ypWf_S}){_mI?lr9cdndG1QDio^heeW?l5NwtpZDztIah9?8K#5u%-N-(|hZHBRs@a`Di42NgHsM5$$dT_Xsz9NPtI7*^3 z@wrGmut#xKzZ9p+;;V#eAF$1vCL96lw4(J{pkNE};@J?D2!(SNPHhMhgD zy@C-2BvA-TL|6=J*?DI7khxYFji{jxEkGMGc{ZBcH8?sQ7+G`Usu+W5PN!2g!#Qp23MMt0;Jm5e@Opl#eg#UGSnVE01I!e$Lp>XSzG za`tud*j5@B%YuwIPTD*etJ5v&Qd-dr8Z7UAUtZ+R_P=_weQh@24uAXK-9Knn{J$5w zt^JMtKSe28IDXd1%}-S^>7(UQc$g_W#u>_z3ycOt`OtD;Del{6cM;JPMn=d~{+}_% z`1V1JP(F|;fmr1q)0)OTRLzG=6C&UiY}i(Ls{{!w7JjW+lZlP>lyJ@p>5*EGRqmuX zewIEdD}H?bs&mpAUVohR`om=-q%jd6+*WgEzvz%hz z2etHRATdO)DPhX{s06P=x4Rd4G~vRA6Y9u?39pUu6((JVeZ5xE>+7YX>|UYLiSc}^ z_!IMBUk7~F9d!EdI{k}wr$2n~a-h^+jL|Ky$DQ{NMZLs%34gSF6&I6DUY2QEH?x!8 z>x;wF!;|;JZhP>THoG)ZX}6b`Rc+LFs}p&2(XDO%f24@~LM5b+sO);J)t?bP^+9Q( z6XnGsEC9{~OCo%0u5DGfi^4fA;6_F|#dO!rcx5Sph$3Pb?!RtT=6@>!k`rYl8jUOh zCEe%X+UT75QGbXRyU$Cfw0u768AbR4pAw-F7K;=|glHM53{%ytgqkNk3s_*mgsQ;U z&|bZB8u_cLVhB`M0PKwI*A^t8bs(E^q%a!EN9rwpDks&rwdOP#jT1vTq#v2st}@>I6z7TdFURwnb?>mtWE&oxT3{dLIn_}q9} z>VE^=(xEgQ5A&b*dPCHF{xUIta0{QZ73j23e~j~0gE8@C-oDrus467%g)9y1u2;0! zeQPbQnfccMoBmtHwS#|b7UVYn&w9U^^ZPY!;9c?momMse-)!wQH~!yKl>5j33*KQV m0ROPyel-;Ty~V>fWm7g~Q~oLC{{a91{|Wyd>M}k6H~;_=vM5Uc delta 2248 zcmV;(2sih!6zma@Jb!C%+qlx+&-xVu?OUK-#EP9;UkFWcQpcw?(#C=9bkRLM2xw_+ zv!zItq@22`|N8+c$=1cWZG&_d>-iFgmxsg4JTn|>Mx{;&YkW>d$WU*$r$lORP9pZp zgGsa5Y(C%Lw||?>X7z7#_uydp+m^TXcABkU;Ndj5K@z2j{C}m{{860h7`B}a`40+uL^(sLq0xh9mORKC*y zj0j~ud`kEyir@Yf(Lf3%#@Y)+r( zdqVXz85yAM>VGk=_MV|sc8X1}d)5CsQd>B}5>yXM0JqqGtJ!K+?SF4)XJh}5 zAv7Wr^c?_+u~1YCIrm{{q%}JN&}8BRBN~uO<>dkGW6A-qrAXpc;eW*1wV@Ib;;H41?4O}wqUC=* zHJrz3C5fp*IYZe-smmp~W|81#F)%cUDkg#H>HG@aj1rG=OcJJ{|LUj>QZQ6NcnBlP zL&_)0ks}hAWymPhhcRIa9RM>74e%%kM8fsyTCM~5H3cRyg^ar3X^=>XT(ddMS|ik? z^(Z$>BY`M#OpZb2}Gq1$n*VUZhyk3>FdA+90 z>gD3>!GKD32IIG1VW6Nm}3Zn!+%^PautF)Lt$u>j_07KMAty3M1k3M zjo}u^g!_&IwldHR8V#OP{^8XXczt9@6naZX0E>mu0b&s3};<7&9O) zc(7Qwj$>9CnL<0SUmsK~96+bv@AZB7AQB)FA)-o|HIn!xp%Oz#<2s-mv{}_vlm}zV z(0>CXOSlKr&KXXr%C;&(uHlk01|tN@0};gxHA2EQWnjFj(QT>nm?8_WRT-vahA=|p z5KA$mA%^M3CviwLuJ#pRYA!WNJdq?ssBWp*&5yQOqoGhk?erhM_|QA=H;(=|?|0g7 zyPeZv_qg$<)9<|8s@p@o@k!@HwRLVH#ec@b&gs!QfG-;w|OL*YCem9*Rr#a?3<9UKYX}zOW(8cP$fq znH^~!!eX(B9sb60z5lz~o~>DqRZNi2;{PnVe|Yz`$$Q3g5j(5+&B?x5)w`PWy1f4>r%sed zVW4f^KgA!E5@>fq%yMoGQtFds^)mK#^2Au0RLhc#*G}487^~AQ>QY+K3>qx&W>NFc z-uO(exBu0h?MGh&Zn6K}-G5d!{(HXL+Svc2NYTRat43~qs)|V;FOS0gOxZEPQ1-aM zXh4(?EeDp(eS7ULA{t?2giPiC8DorZ9>g@t2Qm_f)%#;w(zt`F`Ecoj2)G3swng6R zfdm!{zgDfu#O8WRIA?|Dky?+{+evZ!?D?du_{sUJ&fCuL;;h#nE`KW_&58KnwwgQp zO}Ey|rU@>N&WCR<-uBu@!*1`i-gz5!Ptu5@orHK4OA*2_S%yreu&xoG-pfu5m@?sGd0B3?F5xz4n zQPs~};S3aTBhxy?bl1&lbqWI!MZ~bjf81)D|E`Q%PLyfUXk285*GhgOMAs6dz zl}7gh@>T_cVxzO@0K?PkCsC38j`uN42-hz z%(Mr{FwjD-v^HMJ`@FoMo9I(0ADCJH%{}%{$o27GdORNDQ;^&8f6sSX)%@QTQ!id5PACQ3O>3)_R8|n_2HxYM16F(mZhm0 z|Lc?N&-!1cub|Gl6i-Rv7lU^OP03Fzb0d1LbCWJA(VftBpP3P$P!Pd+;5E3g&!@`B9OGo#vI~a6NU%S`8Hb1*Z>l2*ct$Fzw z?QVVYXS!$b9YzOx$b*6hHfKAF3hkL*0S?dE@Vv=-{ma9{3TpAQqi1EXEEv2$mR*7Z z)IIJ0CPDtU9y)W-KjH>%%m3}Ps`,env.azureClientKey=,env.azureTenantId=,env.azureSubscriptionId=,env.aciResourceGroup=,ev.aciOsType=,rbac.install= {{- end }} + +{{- if (not .Values.env.apiserverCert) and (not .Values.env.apiserverKey) }} + +Note: +TLS key pair not provided for VK HTTP listener. A key pair was generated for you. This generated key pair is not suitable for production use. + +{{- end }} \ No newline at end of file diff --git a/charts/virtual-kubelet/templates/secrets.yaml b/charts/virtual-kubelet/templates/secrets.yaml index bb25d6d74..2231caf6c 100644 --- a/charts/virtual-kubelet/templates/secrets.yaml +++ b/charts/virtual-kubelet/templates/secrets.yaml @@ -5,8 +5,18 @@ metadata: type: Opaque data: credentials.json: {{ printf "{ \"clientId\": \"%s\", \"clientSecret\": \"%s\", \"subscriptionId\": \"%s\", \"tenantId\": \"%s\", \"activeDirectoryEndpointUrl\": \"https://login.microsoftonline.com/\", \"resourceManagerEndpointUrl\": \"https://management.azure.com/\", \"activeDirectoryGraphResourceId\": \"https://graph.windows.net/\", \"sqlManagementEndpointUrl\": \"database.windows.net\", \"galleryEndpointUrl\": \"https://gallery.azure.com/\", \"managementEndpointUrl\": \"https://management.core.windows.net/\" }" (default "MISSING" .Values.env.azureClientId) (default "MISSING" .Values.env.azureClientKey) (default "MISSING" .Values.env.azureSubscriptionId) (default "MISSING" .Values.env.azureTenantId) | b64enc | quote }} - cert.pem: {{ (default "TUlTU0lORw==" .Values.env.apiserverCert) | quote }} - key.pem: {{ (default "TUlTU0lORw==" .Values.env.apiserverKey) | quote }} + {{- if (not .Values.env.apiserverCert) and (not .Values.env.apiserverKey) }} + {{- $ca := genCA "virtual-kubelet-ca" 3650 }} + {{- $cn := printf "%s-virtual-kubelet-apiserver" .Release.Name }} + {{- $altName1 := printf "%s-virtual-kubelet-apiserver.%s" .Release.Name .Release.Namespace }} + {{- $altName2 := printf "%s-virtual-kubelet-apiserver.%s.svc" .Release.Name .Release.Namespace }} + {{- $cert := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca }} + cert.pem: {{ b64enc $cert.Cert }} + key.pem: {{ b64enc $cert.Key }} + {{ else }} + cert.pem: {{ quote .Values.env.apiserverCert }} + key.pem: {{ quote .Values.env.apiserverKey }} + {{ end}} {{ if .Values.loganalytics.enabled }} loganalytics.json: {{ printf "{\"workspaceID\": \"%s\",\"workspaceKey\": \"%s\"}" (required "workspaceID is required for loganalytics" .Values.loganalytics.workspaceID ) (required "workspaceKey is required for loganalytics" .Values.loganalytics.workspaceKey ) }} {{ end }} From fac77abdfa32164f98234dad632fe65ac61dd329 Mon Sep 17 00:00:00 2001 From: yaron2 Date: Wed, 1 Aug 2018 17:23:49 -0700 Subject: [PATCH 18/21] added basic tests --- providers/sfmesh/sfmesh_test.go | 68 +++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 providers/sfmesh/sfmesh_test.go diff --git a/providers/sfmesh/sfmesh_test.go b/providers/sfmesh/sfmesh_test.go new file mode 100644 index 000000000..b446ad126 --- /dev/null +++ b/providers/sfmesh/sfmesh_test.go @@ -0,0 +1,68 @@ +package sfmesh + +import ( + "errors" + "os" + "testing" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func setEnvVars() { + os.Setenv("AZURE_SUBSCRIPTION_ID", "fake") + os.Setenv("AZURE_TENANT_ID", "fake") + os.Setenv("AZURE_CLIENT_ID", "fake") + os.Setenv("AZURE_CLIENT_SECRET", "fake") + os.Setenv("REGION", "fake") + os.Setenv("RESOURCE_GROUP", "fake") +} + +func Test_podToMeshApp(t *testing.T) { + setEnvVars() + + pod := &v1.Pod{} + pod.ObjectMeta = metav1.ObjectMeta{ + Name: "test-pod", + } + pod.Spec = v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "testcontainer", + Image: "nginx", + Ports: []v1.ContainerPort{ + { + Name: "http", + ContainerPort: 80, + }, + }, + }, + }, + } + + provider, err := NewSFMeshProvider(nil, "testnode", "Linux", "6.6.6.6", 80) + if err != nil { + t.Error(err.Error()) + } + + _, err = provider.getMeshApplication(pod) + if err != nil { + t.Error(err.Error()) + } +} + +func Test_meshStateToPodCondition(t *testing.T) { + setEnvVars() + + meshStateSucceeded := "Succeeded" + + provider, err := NewSFMeshProvider(nil, "testnode", "Linux", "6.6.6.6", 80) + if err != nil { + t.Error(err.Error()) + } + + phase := provider.appStateToPodPhase(meshStateSucceeded) + if phase != v1.PodRunning { + t.Error(errors.New("PodRunning phase expected")) + } +} From 4efc61d1ae359149d7534857a21d788d0e44d6e4 Mon Sep 17 00:00:00 2001 From: Onur Filiz Date: Fri, 3 Aug 2018 11:48:27 -0700 Subject: [PATCH 19/21] Fargate: Add stubs for new provider APIs --- providers/aws/config.go | 2 +- providers/aws/provider.go | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/providers/aws/config.go b/providers/aws/config.go index 77ea9f04c..45a9ac94f 100644 --- a/providers/aws/config.go +++ b/providers/aws/config.go @@ -63,7 +63,7 @@ func (p *FargateProvider) loadConfigFile(filePath string) error { return err } -// loadConfigStream loads the given Fargate provider TOML configuration stream. +// loadConfig loads the given Fargate provider TOML configuration stream. func (p *FargateProvider) loadConfig(r io.Reader) error { var config providerConfig var q resource.Quantity diff --git a/providers/aws/provider.go b/providers/aws/provider.go index ffe9411b4..ed76f5621 100644 --- a/providers/aws/provider.go +++ b/providers/aws/provider.go @@ -178,17 +178,18 @@ func (p *FargateProvider) GetContainerLogs(namespace, podName, containerName str return p.cluster.GetContainerLogs(namespace, podName, containerName, tail) } -// Get full pod name as defined in the provider context +// GetPodFullName retrieves the full pod name as defined in the provider context. func (p *FargateProvider) GetPodFullName(namespace string, pod string) string { return "" } // ExecInContainer executes a command in a container in the pod, copying data // between in/out/err and the container's stdin/stdout/stderr. -// TODO: Implementation -func (p *FargateProvider) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - log.Printf("receive ExecInContainer %q\n", container) - return nil +func (p *FargateProvider) ExecInContainer( + name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, + tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { + log.Printf("Received ExecInContainer request for %s.\n", container) + return errNotImplemented } // GetPodStatus retrieves the status of a pod by name from the provider. From 9e55d2ee9803bc0d3935f65247ea29ca9cbcbf9c Mon Sep 17 00:00:00 2001 From: Onur Filiz Date: Fri, 3 Aug 2018 11:49:29 -0700 Subject: [PATCH 20/21] Fargate: Add environment variable support --- providers/aws/fargate/container.go | 12 ++++++++++++ providers/aws/fargate/container_test.go | 15 ++++++++++++++- providers/aws/provider_test.go | 22 +++++++++++++++++++--- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/providers/aws/fargate/container.go b/providers/aws/fargate/container.go index de9ccb9b3..5e16a853c 100644 --- a/providers/aws/fargate/container.go +++ b/providers/aws/fargate/container.go @@ -52,6 +52,18 @@ func newContainer(spec *corev1.Container) (*container, error) { cntr.definition.WorkingDirectory = aws.String(spec.WorkingDir) } + // Add environment variables. + if spec.Env != nil { + for _, env := range spec.Env { + cntr.definition.Environment = append( + cntr.definition.Environment, + &ecs.KeyValuePair{ + Name: aws.String(env.Name), + Value: aws.String(env.Value), + }) + } + } + // Translate the Kubernetes container resource requirements to Fargate units. cntr.setResourceRequirements(&spec.Resources) diff --git a/providers/aws/fargate/container_test.go b/providers/aws/fargate/container_test.go index a563049db..5700885d3 100644 --- a/providers/aws/fargate/container_test.go +++ b/providers/aws/fargate/container_test.go @@ -32,6 +32,10 @@ var ( Command: []string{"anyCmd"}, Args: []string{"anyArg1", "anyArg2"}, WorkingDir: "/any/working/dir", + Env: []corev1.EnvVar{ + {Name: "anyEnvName1", Value: "anyEnvValue1"}, + {Name: "anyEnvName2", Value: "anyEnvValue2"}, + }, } ) @@ -46,8 +50,17 @@ func TestContainerDefinition(t *testing.T) { assert.Equal(t, cntrSpec.Name, *cntr.definition.Name, "incorrect name") assert.Equal(t, cntrSpec.Image, *cntr.definition.Image, "incorrect image") assert.Equal(t, cntrSpec.Command[0], *cntr.definition.EntryPoint[0], "incorrect command") - assert.Equal(t, cntrSpec.Args[0], *cntr.definition.Command[0], "incorrect args") + + for i, env := range cntrSpec.Args { + assert.Equal(t, env, *cntr.definition.Command[i], "incorrect args") + } + assert.Equal(t, cntrSpec.WorkingDir, *cntr.definition.WorkingDirectory, "incorrect working dir") + + for i, env := range cntrSpec.Env { + assert.Equal(t, env.Name, *cntr.definition.Environment[i].Name, "incorrect env name") + assert.Equal(t, env.Value, *cntr.definition.Environment[i].Value, "incorrect env value") + } } // TestContainerResourceRequirementsDefaults verifies whether the container gets default CPU diff --git a/providers/aws/provider_test.go b/providers/aws/provider_test.go index 79d10d90a..b38a9f0e6 100644 --- a/providers/aws/provider_test.go +++ b/providers/aws/provider_test.go @@ -237,7 +237,13 @@ func TestAWSFargateProviderPodLifecycle(t *testing.T) { "/bin/sh", }, Args: []string{ - "-c", "echo \"Started\"; while true; do sleep 1; done", + "-c", + "echo \"Started\";" + + "echo \"TEST_ENV=$TEST_ENV\";" + + "while true; do sleep 1; done", + }, + Env: []v1.EnvVar{ + {Name: "TEST_ENV", Value: "AnyValue"}, }, Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ @@ -281,8 +287,18 @@ func TestAWSFargateProviderPodLifecycle(t *testing.T) { t.Error(err) } - if logs != "Started\n" { - t.Errorf("Expected logs to be \"Started\\n\", but received \"%v\"", logs) + // Test log output. + receivedLogs := strings.Split(logs, "\n") + expectedLogs := []string{ + "Started", + pod.Spec.Containers[0].Env[0].Name + "=" + pod.Spec.Containers[0].Env[0].Value, + } + + for i, line := range receivedLogs { + fmt.Printf("Log[#%d]: %v\n", i, line) + if len(expectedLogs) > i && receivedLogs[i] != expectedLogs[i] { + t.Errorf("Expected log line %d to be %q, but received %q", i, line, receivedLogs[i]) + } } // Delete the pod. From 7e7947cd3dfd946f2d0f1c92a8500190bfd6cbdb Mon Sep 17 00:00:00 2001 From: Deep Kapur Date: Fri, 3 Aug 2018 15:13:36 -0700 Subject: [PATCH 21/21] fixing a minor typo (#290) * fixing a minor typo * Update README.md --- providers/sfmesh/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/providers/sfmesh/README.md b/providers/sfmesh/README.md index df6c3e699..2549dc2ed 100644 --- a/providers/sfmesh/README.md +++ b/providers/sfmesh/README.md @@ -1,12 +1,12 @@ # Kubernetes Virtual Kubelet with Service Fabric Mesh -[Service Fabric Mesh](https://docs.microsoft.com/en-us/azure/service-fabric-mesh/service-fabric-mesh-overview) is a fully managed service that enables developers to deploy microservices applications without managing virtual machines, storage, or networking. Applications hosted on Service Fabric Mesh run and scale without you worrying about the infrastructure powering it. +[Service Fabric Mesh](https://docs.microsoft.com/en-us/azure/service-fabric-mesh/service-fabric-mesh-overview) is a fully managed service that enables developers to deploy microservices applications without managing virtual machines, storage, or networking. Applications hosted on Service Fabric Mesh run and scale without you worrying about the infrastructure powering them. The Virtual kubelet integration allows you to use the Kubernetes API to burst out compute to a Service Fabric Mesh cluster and schedule pods as Mesh Applications. ## Status: Experimental -This provider is currently in the exterimental stages. Contributions welcome! +This provider is currently in the experimental stages. Contributions are welcome! ## Setup