Remove sync provider support
This removes the legacy sync provider interface. All new providers are expected to implement the async NotifyPods interface. The legacy sync provider interface creates complexities around how the deletion flow works, and the mixed sync and async APIs block us from evolving functionality. This collapses in the NotifyPods interface into the PodLifecycleHandler interface.
This commit is contained in:
@@ -116,15 +116,6 @@ func TestPodLifecycle(t *testing.T) {
|
||||
testCreateStartDeleteScenario(ctx, t, s, isPodDeletedPermanentlyFunc)
|
||||
}))
|
||||
})
|
||||
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
t.Run("mockV0Provider", func(t *testing.T) {
|
||||
assert.NilError(t, wireUpSystem(ctx, newMockV0Provider(), func(ctx context.Context, s *system) {
|
||||
testCreateStartDeleteScenario(ctx, t, s, isPodDeletedPermanentlyFunc)
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
// createStartDeleteScenarioWithDeletionErrorNotFound tests the flow if the pod was not found in the provider
|
||||
@@ -160,20 +151,13 @@ func TestPodLifecycle(t *testing.T) {
|
||||
t.Run("mockProvider", func(t *testing.T) {
|
||||
mp := newMockProvider()
|
||||
assert.NilError(t, wireUpSystem(ctx, mp, func(ctx context.Context, s *system) {
|
||||
testDanglingPodScenario(ctx, t, s, mp.mockV0Provider)
|
||||
testDanglingPodScenario(ctx, t, s, mp)
|
||||
}))
|
||||
})
|
||||
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
t.Run("mockV0Provider", func(t *testing.T) {
|
||||
mp := newMockV0Provider()
|
||||
assert.NilError(t, wireUpSystem(ctx, mp, func(ctx context.Context, s *system) {
|
||||
testDanglingPodScenario(ctx, t, s, mp)
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
// failedPodScenario ensures that the VK ignores failed pods that were failed prior to the PC starting up
|
||||
@@ -184,16 +168,6 @@ func TestPodLifecycle(t *testing.T) {
|
||||
testFailedPodScenario(ctx, t, s)
|
||||
}))
|
||||
})
|
||||
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
|
||||
t.Run("mockV0Provider", func(t *testing.T) {
|
||||
assert.NilError(t, wireUpSystem(ctx, newMockV0Provider(), func(ctx context.Context, s *system) {
|
||||
testFailedPodScenario(ctx, t, s)
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
// succeededPodScenario ensures that the VK ignores succeeded pods that were succeeded prior to the PC starting up.
|
||||
@@ -204,14 +178,6 @@ func TestPodLifecycle(t *testing.T) {
|
||||
testSucceededPodScenario(ctx, t, s)
|
||||
}))
|
||||
})
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
t.Run("mockV0Provider", func(t *testing.T) {
|
||||
assert.NilError(t, wireUpSystem(ctx, newMockV0Provider(), func(ctx context.Context, s *system) {
|
||||
testSucceededPodScenario(ctx, t, s)
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
// updatePodWhileRunningScenario updates a pod while the VK is running to ensure the update is propagated
|
||||
@@ -224,15 +190,6 @@ func TestPodLifecycle(t *testing.T) {
|
||||
}))
|
||||
})
|
||||
})
|
||||
|
||||
// podStatusMissingWhileRunningScenario waits for the pod to go into the running state, with a V0 style provider,
|
||||
// and then makes the pod disappear!
|
||||
t.Run("podStatusMissingWhileRunningScenario", func(t *testing.T) {
|
||||
mp := newMockV0Provider()
|
||||
assert.NilError(t, wireUpSystem(ctx, mp, func(ctx context.Context, s *system) {
|
||||
testPodStatusMissingWhileRunningScenario(ctx, t, s, mp)
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
type testFunction func(ctx context.Context, s *system)
|
||||
@@ -358,7 +315,7 @@ func testTerminalStatePodScenario(ctx context.Context, t *testing.T, s *system,
|
||||
assert.DeepEqual(t, p1, p2)
|
||||
}
|
||||
|
||||
func testDanglingPodScenario(ctx context.Context, t *testing.T, s *system, m *mockV0Provider) {
|
||||
func testDanglingPodScenario(ctx context.Context, t *testing.T, s *system, m *mockProvider) {
|
||||
t.Parallel()
|
||||
|
||||
pod := newPod()
|
||||
@@ -372,7 +329,6 @@ func testDanglingPodScenario(ctx context.Context, t *testing.T, s *system, m *mo
|
||||
}
|
||||
|
||||
func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system, waitFunction func(ctx context.Context, watch watch.Interface) error) {
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
@@ -530,87 +486,6 @@ func testUpdatePodWhileRunningScenario(ctx context.Context, t *testing.T, s *sys
|
||||
assert.NilError(t, m.updates.until(ctx, func(v int) bool { return v > 0 }))
|
||||
}
|
||||
|
||||
func testPodStatusMissingWhileRunningScenario(ctx context.Context, t *testing.T, s *system, m *mockV0Provider) {
|
||||
t.Parallel()
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
p := newPod()
|
||||
key, err := buildKey(p)
|
||||
assert.NilError(t, err)
|
||||
|
||||
listOptions := metav1.ListOptions{
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", p.ObjectMeta.Name).String(),
|
||||
}
|
||||
|
||||
watchErrCh := make(chan error)
|
||||
|
||||
// Create a Pod
|
||||
_, e := s.client.CoreV1().Pods(testNamespace).Create(p)
|
||||
assert.NilError(t, e)
|
||||
|
||||
// Setup a watch to check if the pod is in running
|
||||
watcher, err := s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
|
||||
assert.NilError(t, err)
|
||||
defer watcher.Stop()
|
||||
go func() {
|
||||
newPod, watchErr := watchutils.UntilWithoutRetry(ctx, watcher,
|
||||
// Wait for the pod to be started
|
||||
func(ev watch.Event) (bool, error) {
|
||||
pod := ev.Object.(*corev1.Pod)
|
||||
return pod.Status.Phase == corev1.PodRunning, nil
|
||||
})
|
||||
// This deepcopy is required to please the race detector
|
||||
p = newPod.Object.(*corev1.Pod).DeepCopy()
|
||||
watchErrCh <- watchErr
|
||||
}()
|
||||
|
||||
// Start the pod controller
|
||||
assert.NilError(t, s.start(ctx))
|
||||
|
||||
// Wait for pod to be in running
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("Context ended early: %s", ctx.Err().Error())
|
||||
case <-s.pc.Done():
|
||||
assert.NilError(t, s.pc.Err())
|
||||
t.Fatal("Pod controller exited prematurely without error")
|
||||
case err = <-watchErrCh:
|
||||
assert.NilError(t, err)
|
||||
|
||||
}
|
||||
|
||||
// Setup a watch to check if the pod is in failed due to provider issues
|
||||
watcher, err = s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
|
||||
assert.NilError(t, err)
|
||||
defer watcher.Stop()
|
||||
go func() {
|
||||
newPod, watchErr := watchutils.UntilWithoutRetry(ctx, watcher,
|
||||
// Wait for the pod to be failed
|
||||
func(ev watch.Event) (bool, error) {
|
||||
pod := ev.Object.(*corev1.Pod)
|
||||
return pod.Status.Phase == corev1.PodFailed, nil
|
||||
})
|
||||
// This deepcopy is required to please the race detector
|
||||
p = newPod.Object.(*corev1.Pod).DeepCopy()
|
||||
watchErrCh <- watchErr
|
||||
}()
|
||||
|
||||
// delete the pod from the mock provider
|
||||
m.pods.Delete(key)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
t.Fatalf("Context ended early: %s", ctx.Err().Error())
|
||||
case <-s.pc.Done():
|
||||
assert.NilError(t, s.pc.Err())
|
||||
t.Fatal("Pod controller exited prematurely without error")
|
||||
case err = <-watchErrCh:
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, p.Status.Reason, podStatusReasonNotFound)
|
||||
}
|
||||
|
||||
func BenchmarkCreatePods(b *testing.B) {
|
||||
sl := logrus.StandardLogger()
|
||||
sl.SetLevel(logrus.ErrorLevel)
|
||||
|
||||
Reference in New Issue
Block a user