From 0543245668b19976ef75c88b040444bbd28a1a37 Mon Sep 17 00:00:00 2001 From: Brian Goff Date: Tue, 18 May 2021 21:33:09 +0000 Subject: [PATCH] lifecycle test: timeout send goroutine on context In error cases these goroutines never exit. Trying to debug cases we end up with a bunch of these goroutines stuck making it difficult to troubleshoot. We could just make a buffered channel, however this will makes it less clear, in cases of an error, what all is happening. --- node/lifecycle_test.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/node/lifecycle_test.go b/node/lifecycle_test.go index 10e187597..506eb016a 100644 --- a/node/lifecycle_test.go +++ b/node/lifecycle_test.go @@ -367,6 +367,14 @@ func testDanglingPodScenario(ctx context.Context, t *testing.T, s *system, m tes } +func sendErr(ctx context.Context, ch chan error, err error) { + select { + case <-ctx.Done(): + log.G(ctx).WithError(err).Warn("timeout waiting to send test error") + case ch <- err: + } +} + func testDanglingPodScenarioWithDeletionTimestamp(ctx context.Context, t *testing.T, s *system, m testingProvider) { t.Parallel() @@ -396,7 +404,7 @@ func testDanglingPodScenarioWithDeletionTimestamp(ctx context.Context, t *testin func(ev watch.Event) (bool, error) { return ev.Type == watch.Deleted, nil }) - watchErrCh <- watchErr + sendErr(ctx, watchErrCh, watchErr) }() // Start the pod controller @@ -436,7 +444,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system, return pod.Name == p.ObjectMeta.Name, nil }) - watchErrCh <- watchErr + sendErr(ctx, watchErrCh, watchErr) }() // Create the Pod @@ -465,7 +473,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system, return pod.Status.Phase == corev1.PodRunning, nil }) - watchErrCh <- watchErr + sendErr(ctx, watchErrCh, watchErr) }() assert.NilError(t, s.start(ctx)) @@ -487,7 +495,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system, _, watchDeleteErr := watchutils.UntilWithoutRetry(ctx, watcher2, func(ev watch.Event) (bool, error) { return ev.Type == watch.Deleted, nil }) - waitDeleteCh <- watchDeleteErr + sendErr(ctx, waitDeleteCh, watchDeleteErr) }() // Setup a watch prior to pod deletion @@ -495,7 +503,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system, assert.NilError(t, err) defer watcher.Stop() go func() { - watchErrCh <- waitFunction(ctx, watcher) + sendErr(ctx, watchErrCh, waitFunction(ctx, watcher)) }() // Delete the pod via deletiontimestamp @@ -559,7 +567,7 @@ func testUpdatePodWhileRunningScenario(ctx context.Context, t *testing.T, s *sys }) // This deepcopy is required to please the race detector p = newPod.Object.(*corev1.Pod).DeepCopy() - watchErrCh <- watchErr + sendErr(ctx, watchErrCh, watchErr) }() // Start the pod controller