use shared informers and workqueue (#425)
* vendor: add vendored code
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* controller: use shared informers and a work queue
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* errors: use cpuguy83/strongerrors
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* aci: fix test that uses resource manager
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* readme: clarify skaffold run before e2e
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* cmd: use root context everywhere
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: refactor pod lifecycle management
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* e2e: fix race in test when observing deletions
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* e2e: test pod forced deletion
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* cmd: fix root context potential leak
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: rename metaKey
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: remove calls to HandleError
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* Revert "errors: use cpuguy83/strongerrors"
This reverts commit f031fc6d.
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* manager: remove redundant lister constraint
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: rename the pod event recorder
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: amend misleading comment
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* mock: add tracing
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: add tracing
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* test: observe timeouts
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* trace: remove unnecessary comments
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: limit concurrency in deleteDanglingPods
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: never store context, always pass in calls
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: remove HandleCrash and just panic
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: don't sync succeeded pods
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: ensure pod deletion from kubernetes
Signed-off-by: Paulo Pires <pjpires@gmail.com>
This commit is contained in:
committed by
Robbie Zhang
parent
0e9cfca585
commit
28a757f4da
@@ -5,12 +5,18 @@ package e2e
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
// deleteGracePeriodForProvider is the amount of time we allow for the provider to react to deletion of a pod before proceeding to assert that the pod has been deleted.
|
||||
deleteGracePeriodForProvider = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// TestGetStatsSummary creates a pod having two containers and queries the /stats/summary endpoint of the virtual-kubelet.
|
||||
// It expects this endpoint to return stats for the current node, as well as for the aforementioned pod and each of its two containers.
|
||||
func TestGetStatsSummary(t *testing.T) {
|
||||
@@ -110,15 +116,30 @@ func TestPodLifecycle(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the "nginx-1-Y" pod to be deleted in a separate goroutine.
|
||||
// This ensures that we don't possibly miss the MODIFIED/DELETED events due to establishing the watch too late in the process.
|
||||
pod1Ch := make(chan error)
|
||||
go func() {
|
||||
// Wait for the "nginx-1-Y" pod to be reported as having been marked for deletion.
|
||||
if err := f.WaitUntilPodDeleted(pod1.Namespace, pod1.Name); err != nil {
|
||||
// Propagate the error to the outside so we can fail the test.
|
||||
pod1Ch <- err
|
||||
} else {
|
||||
// Close the pod0Ch channel, signaling we've observed deletion of the pod.
|
||||
close(pod1Ch)
|
||||
}
|
||||
}()
|
||||
|
||||
// Delete the "nginx-1" pod.
|
||||
if err := f.DeletePod(pod1.Namespace, pod1.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wait for the "nginx-1-Y" pod to be reported as having been marked for deletion.
|
||||
if err := f.WaitUntilPodDeleted(pod1.Namespace, pod1.Name); err != nil {
|
||||
// Wait for the delete event to be ACKed.
|
||||
if err := <-pod1Ch; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Give the provider some time to react to the MODIFIED/DELETED events before proceeding.
|
||||
time.Sleep(deleteGracePeriodForProvider)
|
||||
|
||||
// Grab the stats from the provider.
|
||||
stats, err = f.GetStatsSummary()
|
||||
@@ -130,6 +151,42 @@ func TestPodLifecycle(t *testing.T) {
|
||||
if _, err := findPodInPodStats(stats, pod1); err == nil {
|
||||
t.Fatalf("expected to NOT find pod \"%s/%s\" in the slice of pod stats", pod1.Namespace, pod1.Name)
|
||||
}
|
||||
|
||||
// Wait for the "nginx-0-X" pod to be deleted in a separate goroutine.
|
||||
// This ensures that we don't possibly miss the MODIFIED/DELETED events due to establishing the watch too late in the process.
|
||||
pod0Ch := make(chan error)
|
||||
go func() {
|
||||
// Wait for the "nginx-0-X" pod to be reported as having been deleted.
|
||||
if err := f.WaitUntilPodDeleted(pod0.Namespace, pod0.Name); err != nil {
|
||||
// Propagate the error to the outside so we can fail the test.
|
||||
pod0Ch <- err
|
||||
} else {
|
||||
// Close the pod0Ch channel, signaling we've observed deletion of the pod.
|
||||
close(pod0Ch)
|
||||
}
|
||||
}()
|
||||
|
||||
// Forcibly delete the "nginx-0" pod.
|
||||
if err := f.DeletePodImmediately(pod0.Namespace, pod0.Name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Wait for the delete event to be ACKed.
|
||||
if err := <-pod0Ch; err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Give the provider some time to react to the MODIFIED/DELETED events before proceeding.
|
||||
time.Sleep(deleteGracePeriodForProvider)
|
||||
|
||||
// Grab the stats from the provider.
|
||||
stats, err = f.GetStatsSummary()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Make sure the "nginx-0-X" pod DOES NOT exist in the slice of PodStats anymore.
|
||||
if _, err := findPodInPodStats(stats, pod0); err == nil {
|
||||
t.Fatalf("expected to NOT find pod \"%s/%s\" in the slice of pod stats", pod0.Namespace, pod0.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// findPodInPodStats returns the index of the specified pod in the .pods field of the specified Summary object.
|
||||
|
||||
@@ -57,11 +57,22 @@ func (f *Framework) CreatePod(pod *corev1.Pod) (*corev1.Pod, error) {
|
||||
return f.KubeClient.CoreV1().Pods(f.Namespace).Create(pod)
|
||||
}
|
||||
|
||||
// DeletePod deletes the pod with the specified name and namespace in the Kubernetes API.
|
||||
// DeletePod deletes the pod with the specified name and namespace in the Kubernetes API using the default grace period.
|
||||
func (f *Framework) DeletePod(namespace, name string) error {
|
||||
return f.KubeClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// DeletePodImmediately forcibly deletes the pod with the specified name and namespace in the Kubernetes API.
|
||||
// This is equivalent to running "kubectl delete --force --grace-period 0 --namespace <namespace> pod <name>".
|
||||
func (f *Framework) DeletePodImmediately(namespace, name string) error {
|
||||
grace := int64(0)
|
||||
propagation := metav1.DeletePropagationBackground
|
||||
return f.KubeClient.CoreV1().Pods(namespace).Delete(name, &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &grace,
|
||||
PropagationPolicy: &propagation,
|
||||
})
|
||||
}
|
||||
|
||||
// WaitUntilPodCondition establishes a watch on the pod with the specified name and namespace.
|
||||
// Then, it waits for the specified condition function to be verified.
|
||||
func (f *Framework) WaitUntilPodCondition(namespace, name string, fn watch.ConditionFunc) error {
|
||||
|
||||
Reference in New Issue
Block a user