Move test/* to internal/test/*
This ensures this code cannot be imported by other repositories as this is only intended for internal testing packages.
This commit is contained in:
@@ -1,364 +0,0 @@
|
||||
// +build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/vkubelet"
|
||||
"gotest.tools/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
// deleteGracePeriodForProvider is the maximum amount of time we allow for the provider to react to deletion of a pod
|
||||
// before proceeding to assert that the pod has been deleted.
|
||||
deleteGracePeriodForProvider = 1 * time.Second
|
||||
)
|
||||
|
||||
// TestGetStatsSummary creates a pod having two containers and queries the /stats/summary endpoint of the virtual-kubelet.
|
||||
// It expects this endpoint to return stats for the current node, as well as for the aforementioned pod and each of its two containers.
|
||||
func TestGetStatsSummary(t *testing.T) {
|
||||
// Create a pod with prefix "nginx-" having three containers.
|
||||
pod, err := f.CreatePod(f.CreateDummyPodObjectWithPrefix(t.Name(), "nginx-", "foo", "bar", "baz"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Delete the "nginx-0-X" pod after the test finishes.
|
||||
defer func() {
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the "nginx-" pod to be reported as running and ready.
|
||||
if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Grab the stats from the provider.
|
||||
stats, err := f.GetStatsSummary()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure that we've got stats for the current node.
|
||||
if stats.Node.NodeName != f.NodeName {
|
||||
t.Fatalf("expected stats for node %s, got stats for node %s", f.NodeName, stats.Node.NodeName)
|
||||
}
|
||||
|
||||
// Make sure the "nginx-" pod exists in the slice of PodStats.
|
||||
idx, err := findPodInPodStats(stats, pod)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure that we've got stats for all the containers in the "nginx-" pod.
|
||||
desiredContainerStatsCount := len(pod.Spec.Containers)
|
||||
currentContainerStatsCount := len(stats.Pods[idx].Containers)
|
||||
if currentContainerStatsCount != desiredContainerStatsCount {
|
||||
t.Fatalf("expected stats for %d containers, got stats for %d containers", desiredContainerStatsCount, currentContainerStatsCount)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPodLifecycleGracefulDelete creates a pod and verifies that the provider has been asked to create it.
|
||||
// Then, it deletes the pods and verifies that the provider has been asked to delete it.
|
||||
// These verifications are made using the /stats/summary endpoint of the virtual-kubelet, by checking for the presence or absence of the pods.
|
||||
// Hence, the provider being tested must implement the PodMetricsProvider interface.
|
||||
func TestPodLifecycleGracefulDelete(t *testing.T) {
|
||||
// Create a pod with prefix "nginx-" having a single container.
|
||||
podSpec := f.CreateDummyPodObjectWithPrefix(t.Name(), "nginx-", "foo")
|
||||
podSpec.Spec.NodeName = nodeName
|
||||
|
||||
pod, err := f.CreatePod(podSpec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Delete the pod after the test finishes.
|
||||
defer func() {
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
t.Logf("Created pod: %s", pod.Name)
|
||||
|
||||
// Wait for the "nginx-" pod to be reported as running and ready.
|
||||
if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Pod %s ready", pod.Name)
|
||||
|
||||
// Grab the pods from the provider.
|
||||
pods, err := f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Check if the pod exists in the slice of PodStats.
|
||||
assert.NilError(t, findPodInPods(pods, pod))
|
||||
|
||||
podCh := make(chan error)
|
||||
var podLast *v1.Pod
|
||||
go func() {
|
||||
// Close the podCh channel, signaling we've observed deletion of the pod.
|
||||
defer close(podCh)
|
||||
|
||||
var err error
|
||||
podLast, err = f.WaitUntilPodDeleted(pod.Namespace, pod.Name)
|
||||
if err != nil {
|
||||
// Propagate the error to the outside so we can fail the test.
|
||||
podCh <- err
|
||||
}
|
||||
}()
|
||||
|
||||
// Gracefully delete the "nginx-" pod.
|
||||
if err := f.DeletePod(pod.Namespace, pod.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Deleted pod: %s", pod.Name)
|
||||
|
||||
// Wait for the delete event to be ACKed.
|
||||
if err := <-podCh; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
time.Sleep(deleteGracePeriodForProvider)
|
||||
// Give the provider some time to react to the MODIFIED/DELETED events before proceeding.
|
||||
// Grab the pods from the provider.
|
||||
pods, err = f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Make sure the pod DOES NOT exist in the provider's set of running pods
|
||||
assert.Assert(t, findPodInPods(pods, pod) != nil)
|
||||
|
||||
// Make sure we saw the delete event, and the delete event was graceful
|
||||
assert.Assert(t, podLast != nil)
|
||||
assert.Assert(t, podLast.ObjectMeta.GetDeletionGracePeriodSeconds() != nil)
|
||||
assert.Assert(t, *podLast.ObjectMeta.GetDeletionGracePeriodSeconds() > 0)
|
||||
}
|
||||
|
||||
// TestPodLifecycleNonGracefulDelete creates one podsand verifies that the provider has created them
|
||||
// and put them in the running lifecycle. It then does a force delete on the pod, and verifies the provider
|
||||
// has deleted it.
|
||||
func TestPodLifecycleForceDelete(t *testing.T) {
|
||||
podSpec := f.CreateDummyPodObjectWithPrefix(t.Name(), "nginx-", "foo")
|
||||
// Create a pod with prefix having a single container.
|
||||
pod, err := f.CreatePod(podSpec)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Delete the pod after the test finishes.
|
||||
defer func() {
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
t.Logf("Created pod: %s", pod.Name)
|
||||
|
||||
// Wait for the "nginx-" pod to be reported as running and ready.
|
||||
if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Pod %s ready", pod.Name)
|
||||
|
||||
// Grab the pods from the provider.
|
||||
pods, err := f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Check if the pod exists in the slice of Pods.
|
||||
assert.NilError(t, findPodInPods(pods, pod))
|
||||
|
||||
// Wait for the pod to be deleted in a separate goroutine.
|
||||
// This ensures that we don't possibly miss the MODIFIED/DELETED events due to establishing the watch too late in the process.
|
||||
// It also makes sure that in light of soft deletes, we properly handle non-graceful pod deletion
|
||||
podCh := make(chan error)
|
||||
var podLast *v1.Pod
|
||||
go func() {
|
||||
// Close the podCh channel, signaling we've observed deletion of the pod.
|
||||
defer close(podCh)
|
||||
|
||||
var err error
|
||||
// Wait for the pod to be reported as having been deleted.
|
||||
podLast, err = f.WaitUntilPodDeleted(pod.Namespace, pod.Name)
|
||||
if err != nil {
|
||||
// Propagate the error to the outside so we can fail the test.
|
||||
podCh <- err
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(deleteGracePeriodForProvider)
|
||||
// Forcibly delete the pod.
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil {
|
||||
t.Logf("Last saw pod in state: %+v", podLast)
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Log("Force deleted pod: ", pod.Name)
|
||||
|
||||
// Wait for the delete event to be ACKed.
|
||||
if err := <-podCh; err != nil {
|
||||
t.Logf("Last saw pod in state: %+v", podLast)
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Give the provider some time to react to the MODIFIED/DELETED events before proceeding.
|
||||
time.Sleep(deleteGracePeriodForProvider)
|
||||
|
||||
// Grab the pods from the provider.
|
||||
pods, err = f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Make sure the "nginx-" pod DOES NOT exist in the slice of Pods anymore.
|
||||
assert.Assert(t, findPodInPods(pods, pod) != nil)
|
||||
|
||||
t.Logf("Pod ended as phase: %+v", podLast.Status.Phase)
|
||||
|
||||
}
|
||||
|
||||
// TestCreatePodWithOptionalInexistentSecrets tries to create a pod referencing optional, inexistent secrets.
|
||||
// It then verifies that the pod is created successfully.
|
||||
func TestCreatePodWithOptionalInexistentSecrets(t *testing.T) {
|
||||
// Create a pod with a single container referencing optional, inexistent secrets.
|
||||
pod, err := f.CreatePod(f.CreatePodObjectWithOptionalSecretKey(t.Name()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete the pod after the test finishes.
|
||||
defer func() {
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the pod to be reported as running and ready.
|
||||
if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for an event concerning the missing secret to be reported on the pod.
|
||||
if err := f.WaitUntilPodEventWithReason(pod, vkubelet.ReasonOptionalSecretNotFound); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Grab the pods from the provider.
|
||||
pods, err := f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Check if the pod exists in the slice of Pods.
|
||||
assert.NilError(t, findPodInPods(pods, pod))
|
||||
}
|
||||
|
||||
// TestCreatePodWithMandatoryInexistentSecrets tries to create a pod referencing inexistent secrets.
|
||||
// It then verifies that the pod is not created.
|
||||
func TestCreatePodWithMandatoryInexistentSecrets(t *testing.T) {
|
||||
// Create a pod with a single container referencing inexistent secrets.
|
||||
pod, err := f.CreatePod(f.CreatePodObjectWithMandatorySecretKey(t.Name()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete the pod after the test finishes.
|
||||
defer func() {
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for an event concerning the missing secret to be reported on the pod.
|
||||
if err := f.WaitUntilPodEventWithReason(pod, vkubelet.ReasonMandatorySecretNotFound); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Grab the pods from the provider.
|
||||
pods, err := f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Check if the pod exists in the slice of PodStats.
|
||||
assert.Assert(t, findPodInPods(pods, pod) != nil)
|
||||
}
|
||||
|
||||
// TestCreatePodWithOptionalInexistentConfigMap tries to create a pod referencing optional, inexistent config map.
|
||||
// It then verifies that the pod is created successfully.
|
||||
func TestCreatePodWithOptionalInexistentConfigMap(t *testing.T) {
|
||||
// Create a pod with a single container referencing optional, inexistent config map.
|
||||
pod, err := f.CreatePod(f.CreatePodObjectWithOptionalConfigMapKey(t.Name()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete the pod after the test finishes.
|
||||
defer func() {
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for the pod to be reported as running and ready.
|
||||
if _, err := f.WaitUntilPodReady(pod.Namespace, pod.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for an event concerning the missing config map to be reported on the pod.
|
||||
if err := f.WaitUntilPodEventWithReason(pod, vkubelet.ReasonOptionalConfigMapNotFound); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Grab the pods from the provider.
|
||||
pods, err := f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Check if the pod exists in the slice of PodStats.
|
||||
assert.NilError(t, findPodInPods(pods, pod))
|
||||
}
|
||||
|
||||
// TestCreatePodWithMandatoryInexistentConfigMap tries to create a pod referencing inexistent secrets.
|
||||
// It then verifies that the pod is not created.
|
||||
func TestCreatePodWithMandatoryInexistentConfigMap(t *testing.T) {
|
||||
// Create a pod with a single container referencing inexistent config map.
|
||||
pod, err := f.CreatePod(f.CreatePodObjectWithMandatoryConfigMapKey(t.Name()))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Delete the pod after the test finishes.
|
||||
defer func() {
|
||||
if err := f.DeletePodImmediately(pod.Namespace, pod.Name); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for an event concerning the missing config map to be reported on the pod.
|
||||
if err := f.WaitUntilPodEventWithReason(pod, vkubelet.ReasonMandatoryConfigMapNotFound); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Grab the pods from the provider.
|
||||
pods, err := f.GetRunningPods()
|
||||
assert.NilError(t, err)
|
||||
|
||||
// Check if the pod exists in the slice of PodStats.
|
||||
assert.Assert(t, findPodInPods(pods, pod) != nil)
|
||||
}
|
||||
|
||||
// findPodInPodStats returns the index of the specified pod in the .pods field of the specified Summary object.
|
||||
// It returns an error if the specified pod is not found.
|
||||
func findPodInPodStats(summary *v1alpha1.Summary, pod *v1.Pod) (int, error) {
|
||||
for i, p := range summary.Pods {
|
||||
if p.PodRef.Namespace == pod.Namespace && p.PodRef.Name == pod.Name && string(p.PodRef.UID) == string(pod.UID) {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("failed to find pod \"%s/%s\" in the slice of pod stats", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
// findPodInPodStats returns the index of the specified pod in the .pods field of the specified PodList object.
|
||||
// It returns error if the pod doesn't exist in the podlist
|
||||
func findPodInPods(pods *v1.PodList, pod *v1.Pod) error {
|
||||
for _, p := range pods.Items {
|
||||
if p.Namespace == pod.Namespace && p.Name == pod.Name && string(p.UID) == string(pod.UID) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to find pod \"%s/%s\" in the slice of pod list", pod.Namespace, pod.Name)
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
bFalse = false
|
||||
bTrue = true
|
||||
)
|
||||
|
||||
// CreatePodObjectWithMandatoryConfigMapKey creates a pod object that references the "key_0" key from the "config-map-0" config map as mandatory.
|
||||
func (f *Framework) CreatePodObjectWithMandatoryConfigMapKey(testName string) *corev1.Pod {
|
||||
return f.CreatePodObjectWithEnv(testName, []corev1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_MAP_0_KEY_0",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "config-map-0"},
|
||||
Key: "key_0",
|
||||
Optional: &bFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CreatePodObjectWithOptionalConfigMapKey creates a pod object that references the "key_0" key from the "config-map-0" config map as optional.
|
||||
func (f *Framework) CreatePodObjectWithOptionalConfigMapKey(testName string) *corev1.Pod {
|
||||
return f.CreatePodObjectWithEnv(testName, []corev1.EnvVar{
|
||||
{
|
||||
Name: "CONFIG_MAP_0_KEY_0",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "config-map-0"},
|
||||
Key: "key_0",
|
||||
Optional: &bTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CreatePodObjectWithMandatorySecretKey creates a pod object that references the "key_0" key from the "secret-0" config map as mandatory.
|
||||
func (f *Framework) CreatePodObjectWithMandatorySecretKey(testName string) *corev1.Pod {
|
||||
return f.CreatePodObjectWithEnv(testName, []corev1.EnvVar{
|
||||
{
|
||||
Name: "SECRET_0_KEY_0",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "secret-0"},
|
||||
Key: "key_0",
|
||||
Optional: &bFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CreatePodObjectWithOptionalSecretKey creates a pod object that references the "key_0" key from the "secret-0" config map as optional.
|
||||
func (f *Framework) CreatePodObjectWithOptionalSecretKey(testName string) *corev1.Pod {
|
||||
return f.CreatePodObjectWithEnv(testName, []corev1.EnvVar{
|
||||
{
|
||||
Name: "SECRET_0_KEY_0",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{Name: "secret-0"},
|
||||
Key: "key_0",
|
||||
Optional: &bTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// CreatePodObjectWithEnv creates a pod object whose name starts with "env-test-" and that uses the specified environment configuration for its first container.
|
||||
func (f *Framework) CreatePodObjectWithEnv(testName string, env []corev1.EnvVar) *corev1.Pod {
|
||||
pod := f.CreateDummyPodObjectWithPrefix(testName, "env-test-", "foo")
|
||||
pod.Spec.Containers[0].Env = env
|
||||
return pod
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// Framework encapsulates the configuration for the current run, and provides helper methods to be used during testing.
|
||||
type Framework struct {
|
||||
KubeClient kubernetes.Interface
|
||||
Namespace string
|
||||
NodeName string
|
||||
}
|
||||
|
||||
// NewTestingFramework returns a new instance of the testing framework.
|
||||
func NewTestingFramework(kubeconfig, namespace, nodeName string) *Framework {
|
||||
return &Framework{
|
||||
KubeClient: createKubeClient(kubeconfig),
|
||||
Namespace: namespace,
|
||||
NodeName: nodeName,
|
||||
}
|
||||
}
|
||||
|
||||
// createKubeClient creates a new Kubernetes client based on the specified kubeconfig file.
|
||||
// If no value for kubeconfig is specified, in-cluster configuration is assumed.
|
||||
func createKubeClient(kubeconfig string) *kubernetes.Clientset {
|
||||
var (
|
||||
cfg *rest.Config
|
||||
err error
|
||||
)
|
||||
if kubeconfig == "" {
|
||||
cfg, err = rest.InClusterConfig()
|
||||
} else {
|
||||
cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return kubernetes.NewForConfigOrDie(cfg)
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
watchapi "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/watch"
|
||||
)
|
||||
|
||||
// WaitUntilNodeCondition establishes a watch on the vk node.
|
||||
// Then, it waits for the specified condition function to be verified.
|
||||
func (f *Framework) WaitUntilNodeCondition(fn watch.ConditionFunc) error {
|
||||
// Create a field selector that matches the specified Pod resource.
|
||||
fs := fields.OneTermEqualSelector("metadata.name", f.NodeName).String()
|
||||
// Create a ListWatch so we can receive events for the matched Pod resource.
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fs
|
||||
return f.KubeClient.CoreV1().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watchapi.Interface, error) {
|
||||
options.FieldSelector = fs
|
||||
return f.KubeClient.CoreV1().Nodes().Watch(options)
|
||||
},
|
||||
}
|
||||
|
||||
// Watch for updates to the Pod resource until fn is satisfied, or until the timeout is reached.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
|
||||
defer cancel()
|
||||
last, err := watch.UntilWithSync(ctx, lw, &corev1.Node{}, nil, fn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if last == nil {
|
||||
return fmt.Errorf("no events received for node %q", f.NodeName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteNode deletes the vk node used by the framework
|
||||
func (f *Framework) DeleteNode() error {
|
||||
var gracePeriod int64
|
||||
propagation := metav1.DeletePropagationBackground
|
||||
opts := metav1.DeleteOptions{
|
||||
PropagationPolicy: &propagation,
|
||||
GracePeriodSeconds: &gracePeriod,
|
||||
}
|
||||
return f.KubeClient.CoreV1().Nodes().Delete(f.NodeName, &opts)
|
||||
}
|
||||
|
||||
// GetNode gets the vk nodeused by the framework
|
||||
func (f *Framework) GetNode() (*corev1.Node, error) {
|
||||
return f.KubeClient.CoreV1().Nodes().Get(f.NodeName, metav1.GetOptions{})
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
watchapi "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/watch"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
const defaultWatchTimeout = 2 * time.Minute
|
||||
|
||||
// CreateDummyPodObjectWithPrefix creates a dujmmy pod object using the specified prefix as the value of .metadata.generateName.
|
||||
// A variable number of strings can be provided.
|
||||
// For each one of these strings, a container that uses the string as its image will be appended to the pod.
|
||||
// This method DOES NOT create the pod in the Kubernetes API.
|
||||
func (f *Framework) CreateDummyPodObjectWithPrefix(testName string, prefix string, images ...string) *corev1.Pod {
|
||||
// Safe the test name
|
||||
if testName != "" {
|
||||
testName = strings.Replace(testName, "/", "-", -1)
|
||||
testName = strings.ToLower(testName)
|
||||
prefix = prefix + "-" + testName + "-"
|
||||
}
|
||||
enableServiceLink := false
|
||||
|
||||
pod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: prefix,
|
||||
Namespace: f.Namespace,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
NodeName: f.NodeName,
|
||||
EnableServiceLinks: &enableServiceLink,
|
||||
},
|
||||
}
|
||||
for idx, img := range images {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
||||
Name: fmt.Sprintf("%s%d", prefix, idx),
|
||||
Image: img,
|
||||
})
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
// CreatePod creates the specified pod in the Kubernetes API.
|
||||
func (f *Framework) CreatePod(pod *corev1.Pod) (*corev1.Pod, error) {
|
||||
return f.KubeClient.CoreV1().Pods(f.Namespace).Create(pod)
|
||||
}
|
||||
|
||||
// DeletePod deletes the pod with the specified name and namespace in the Kubernetes API using the default grace period.
|
||||
func (f *Framework) DeletePod(namespace, name string) error {
|
||||
return f.KubeClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// DeletePodImmediately forcibly deletes the pod with the specified name and namespace in the Kubernetes API.
|
||||
// This is equivalent to running "kubectl delete --force --grace-period 0 --namespace <namespace> pod <name>".
|
||||
func (f *Framework) DeletePodImmediately(namespace, name string) error {
|
||||
grace := int64(0)
|
||||
propagation := metav1.DeletePropagationBackground
|
||||
return f.KubeClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &grace,
|
||||
PropagationPolicy: &propagation,
|
||||
})
|
||||
}
|
||||
|
||||
// WaitUntilPodCondition establishes a watch on the pod with the specified name and namespace.
|
||||
// Then, it waits for the specified condition function to be verified.
|
||||
func (f *Framework) WaitUntilPodCondition(namespace, name string, fn watch.ConditionFunc) (*corev1.Pod, error) {
|
||||
// Create a field selector that matches the specified Pod resource.
|
||||
fs := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.namespace==%s,metadata.name==%s", namespace, name))
|
||||
// Create a ListWatch so we can receive events for the matched Pod resource.
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fs.String()
|
||||
return f.KubeClient.CoreV1().Pods(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watchapi.Interface, error) {
|
||||
options.FieldSelector = fs.String()
|
||||
return f.KubeClient.CoreV1().Pods(namespace).Watch(options)
|
||||
},
|
||||
}
|
||||
// Watch for updates to the Pod resource until fn is satisfied, or until the timeout is reached.
|
||||
ctx, cfn := context.WithTimeout(context.Background(), defaultWatchTimeout)
|
||||
defer cfn()
|
||||
last, err := watch.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, fn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if last == nil {
|
||||
return nil, fmt.Errorf("no events received for pod %q", name)
|
||||
}
|
||||
pod := last.Object.(*corev1.Pod)
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// WaitUntilPodReady blocks until the pod with the specified name and namespace is reported to be running and ready.
|
||||
func (f *Framework) WaitUntilPodReady(namespace, name string) (*corev1.Pod, error) {
|
||||
return f.WaitUntilPodCondition(namespace, name, func(event watchapi.Event) (bool, error) {
|
||||
pod := event.Object.(*corev1.Pod)
|
||||
return pod.Status.Phase == corev1.PodRunning && podutil.IsPodReady(pod) && pod.Status.PodIP != "", nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitUntilPodDeleted blocks until the pod with the specified name and namespace is deleted from apiserver.
|
||||
func (f *Framework) WaitUntilPodDeleted(namespace, name string) (*corev1.Pod, error) {
|
||||
return f.WaitUntilPodCondition(namespace, name, func(event watchapi.Event) (bool, error) {
|
||||
pod := event.Object.(*corev1.Pod)
|
||||
return event.Type == watchapi.Deleted || pod.ObjectMeta.DeletionTimestamp != nil, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitUntilPodInPhase blocks until the pod with the specified name and namespace is in one of the specified phases
|
||||
func (f *Framework) WaitUntilPodInPhase(namespace, name string, phases ...corev1.PodPhase) (*corev1.Pod, error) {
|
||||
return f.WaitUntilPodCondition(namespace, name, func(event watchapi.Event) (bool, error) {
|
||||
pod := event.Object.(*corev1.Pod)
|
||||
for _, p := range phases {
|
||||
if pod.Status.Phase == p {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitUntilPodEventWithReason establishes a watch on events involving the specified pod.
|
||||
// Then, it waits for an event with the specified reason to be created/updated.
|
||||
func (f *Framework) WaitUntilPodEventWithReason(pod *corev1.Pod, reason string) error {
|
||||
// Create a field selector that matches Event resources involving the specified pod.
|
||||
fs := fields.ParseSelectorOrDie(fmt.Sprintf("involvedObject.kind==Pod,involvedObject.uid==%s", pod.UID))
|
||||
// Create a ListWatch so we can receive events for the matched Event resource.
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fs.String()
|
||||
return f.KubeClient.CoreV1().Events(pod.Namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watchapi.Interface, error) {
|
||||
options.FieldSelector = fs.String()
|
||||
return f.KubeClient.CoreV1().Events(pod.Namespace).Watch(options)
|
||||
},
|
||||
}
|
||||
// Watch for updates to the Event resource until fn is satisfied, or until the timeout is reached.
|
||||
ctx, cfn := context.WithTimeout(context.Background(), defaultWatchTimeout)
|
||||
defer cfn()
|
||||
last, err := watch.UntilWithSync(ctx, lw, &corev1.Event{}, nil, func(event watchapi.Event) (b bool, e error) {
|
||||
switch event.Type {
|
||||
case watchapi.Error:
|
||||
fallthrough
|
||||
case watchapi.Deleted:
|
||||
return false, fmt.Errorf("got event of unexpected type %q", event.Type)
|
||||
default:
|
||||
return event.Object.(*corev1.Event).Reason == reason, nil
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if last == nil {
|
||||
return fmt.Errorf("no events involving pod \"%s/%s\" have been seen", pod.Namespace, pod.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRunningPods gets the running pods from the provider of the virtual kubelet
|
||||
func (f *Framework) GetRunningPods() (*corev1.PodList, error) {
|
||||
result := &corev1.PodList{}
|
||||
|
||||
err := f.KubeClient.CoreV1().
|
||||
RESTClient().
|
||||
Get().
|
||||
Resource("nodes").
|
||||
Name(f.NodeName).
|
||||
SubResource("proxy").
|
||||
Suffix("runningpods/").
|
||||
Do().
|
||||
Into(result)
|
||||
|
||||
return result, err
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package framework
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/net"
|
||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
)
|
||||
|
||||
// GetStatsSummary queries the /stats/summary endpoint of the virtual-kubelet and returns the Summary object obtained as a response.
|
||||
func (f *Framework) GetStatsSummary() (*stats.Summary, error) {
|
||||
// Query the /stats/summary endpoint.
|
||||
b, err := f.KubeClient.CoreV1().
|
||||
RESTClient().
|
||||
Get().
|
||||
Namespace(f.Namespace).
|
||||
Resource("pods").
|
||||
SubResource("proxy").
|
||||
Name(net.JoinSchemeNamePort("http", f.NodeName, strconv.Itoa(10255))).
|
||||
Suffix("/stats/summary").DoRaw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Unmarshal the response as a Summary object and return it.
|
||||
res := &stats.Summary{}
|
||||
if err := json.Unmarshal(b, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
// +build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNamespace = v1.NamespaceDefault
|
||||
defaultNodeName = "vkubelet-mock-0"
|
||||
)
|
||||
|
||||
var (
|
||||
// f is the testing framework used for running the test suite.
|
||||
f *framework.Framework
|
||||
|
||||
// kubeconfig is the path to the kubeconfig file to use when running the test suite outside a Kubernetes cluster.
|
||||
kubeconfig string
|
||||
// namespace is the name of the Kubernetes namespace to use for running the test suite (i.e. where to create pods).
|
||||
namespace string
|
||||
// nodeName is the name of the virtual-kubelet node to test.
|
||||
nodeName string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "path to the kubeconfig file to use when running the test suite outside a kubernetes cluster")
|
||||
flag.StringVar(&namespace, "namespace", defaultNamespace, "the name of the kubernetes namespace to use for running the test suite (i.e. where to create pods)")
|
||||
flag.StringVar(&nodeName, "node-name", defaultNodeName, "the name of the virtual-kubelet node to test")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Set sane defaults in case no values (or empty ones) have been provided.
|
||||
setDefaults()
|
||||
// Create a new instance of the test framework targeting the specified node.
|
||||
f = framework.NewTestingFramework(kubeconfig, namespace, nodeName)
|
||||
// Wait for the virtual-kubelet pod to be ready.
|
||||
if _, err := f.WaitUntilPodReady(namespace, nodeName); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Run the test suite.
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
// setDefaults sets sane defaults in case no values (or empty ones) have been provided.
|
||||
func setDefaults() {
|
||||
if namespace == "" {
|
||||
namespace = defaultNamespace
|
||||
}
|
||||
if nodeName == "" {
|
||||
nodeName = defaultNodeName
|
||||
}
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
// +build e2e
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"gotest.tools/assert"
|
||||
is "gotest.tools/assert/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
watchapi "k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
// TestNodeCreateAfterDelete makes sure that a node is automatically recreated
|
||||
// if it is deleted while VK is running.
|
||||
func TestNodeCreateAfterDelete(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
podList, err := f.KubeClient.CoreV1().Pods(f.Namespace).List(metav1.ListOptions{
|
||||
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", f.NodeName).String(),
|
||||
})
|
||||
|
||||
assert.NilError(t, err)
|
||||
assert.Assert(t, is.Len(podList.Items, 0), "Kubernetes does not allow node deletion with dependent objects (pods) in existence: %v")
|
||||
|
||||
chErr := make(chan error, 1)
|
||||
|
||||
originalNode, err := f.GetNode()
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Minute)
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
wait := func(e watchapi.Event) (bool, error) {
|
||||
err = ctx.Err()
|
||||
// Our timeout has expired
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if e.Type == watchapi.Deleted || e.Type == watchapi.Error {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return originalNode.ObjectMeta.UID != e.Object.(*v1.Node).ObjectMeta.UID, nil
|
||||
}
|
||||
chErr <- f.WaitUntilNodeCondition(wait)
|
||||
}()
|
||||
|
||||
assert.NilError(t, f.DeleteNode())
|
||||
|
||||
select {
|
||||
case result := <-chErr:
|
||||
assert.NilError(t, result, "Did not observe new node object created after deletion")
|
||||
case <-ctx.Done():
|
||||
t.Fatal("Test timed out while waiting for node object to be deleted / recreated")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user