Remove sync provider support
This removes the legacy sync provider interface. All new providers are expected to implement the async NotifyPods interface. The legacy sync provider interface creates complexities around how the deletion flow works, and the mixed sync and async APIs block us from evolving functionality. This collapses in the NotifyPods interface into the PodLifecycleHandler interface.
This commit is contained in:
103
node/pod_test.go
103
node/pod_test.go
@@ -225,109 +225,6 @@ func TestPodDelete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchPodStatusFromProvider(t *testing.T) {
|
||||
startedAt := metav1.NewTime(time.Now())
|
||||
finishedAt := metav1.NewTime(startedAt.Add(time.Second * 10))
|
||||
containerStateRunning := &corev1.ContainerStateRunning{StartedAt: startedAt}
|
||||
containerStateTerminated := &corev1.ContainerStateTerminated{StartedAt: startedAt, FinishedAt: finishedAt}
|
||||
containerStateWaiting := &corev1.ContainerStateWaiting{}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
running *corev1.ContainerStateRunning
|
||||
terminated *corev1.ContainerStateTerminated
|
||||
waiting *corev1.ContainerStateWaiting
|
||||
expectedStartedAt metav1.Time
|
||||
expectedFinishedAt metav1.Time
|
||||
}{
|
||||
{desc: "container in running state", running: containerStateRunning, expectedStartedAt: startedAt},
|
||||
{desc: "container in terminated state", terminated: containerStateTerminated, expectedStartedAt: startedAt, expectedFinishedAt: finishedAt},
|
||||
{desc: "container in waiting state", waiting: containerStateWaiting},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
c := newTestController()
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
pod.ObjectMeta.Name = "nginx"
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
containerStatus := corev1.ContainerStatus{}
|
||||
if tc.running != nil {
|
||||
containerStatus.State.Running = tc.running
|
||||
} else if tc.terminated != nil {
|
||||
containerStatus.State.Terminated = tc.terminated
|
||||
} else if tc.waiting != nil {
|
||||
containerStatus.State.Waiting = tc.waiting
|
||||
}
|
||||
pod.Status.ContainerStatuses = []corev1.ContainerStatus{containerStatus}
|
||||
|
||||
pc := c.client.CoreV1().Pods("default")
|
||||
p, err := pc.Create(pod)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
updatedPod, err := c.fetchPodStatusFromProvider(ctx, nil, p)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Equal(t, updatedPod.Status.Phase, corev1.PodFailed)
|
||||
assert.Assert(t, is.Len(updatedPod.Status.ContainerStatuses, 1))
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Running == nil)
|
||||
|
||||
// Test cases for running and terminated container state
|
||||
if tc.running != nil || tc.terminated != nil {
|
||||
// Ensure that the container is in terminated state and other states are nil
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Terminated != nil)
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Waiting == nil)
|
||||
|
||||
terminated := updatedPod.Status.ContainerStatuses[0].State.Terminated
|
||||
assert.Equal(t, terminated.StartedAt, tc.expectedStartedAt)
|
||||
assert.Assert(t, terminated.StartedAt.Before(&terminated.FinishedAt))
|
||||
if tc.terminated != nil {
|
||||
assert.Equal(t, terminated.FinishedAt, tc.expectedFinishedAt)
|
||||
}
|
||||
} else {
|
||||
// Test case for waiting container state
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Terminated == nil)
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Waiting != nil)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFetchPodStatusFromProviderWithExpiredPod(t *testing.T) {
|
||||
c := newTestController()
|
||||
pod := &corev1.Pod{}
|
||||
pod.ObjectMeta.Namespace = "default"
|
||||
pod.ObjectMeta.Name = "nginx"
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
containerStatus := corev1.ContainerStatus{}
|
||||
|
||||
// We should terminate containers in a pod that has not provided pod status update for more than a minute
|
||||
startedAt := time.Now().Add(-(time.Minute + time.Second))
|
||||
containerStatus.State.Running = &corev1.ContainerStateRunning{StartedAt: metav1.NewTime(startedAt)}
|
||||
pod.ObjectMeta.CreationTimestamp.Time = startedAt
|
||||
pod.Status.ContainerStatuses = []corev1.ContainerStatus{containerStatus}
|
||||
|
||||
pc := c.client.CoreV1().Pods("default")
|
||||
p, err := pc.Create(pod)
|
||||
assert.NilError(t, err)
|
||||
|
||||
ctx := context.Background()
|
||||
updatedPod, err := c.fetchPodStatusFromProvider(ctx, nil, p)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.Equal(t, updatedPod.Status.Phase, corev1.PodFailed)
|
||||
assert.Assert(t, is.Len(updatedPod.Status.ContainerStatuses, 1))
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Running == nil)
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Terminated != nil)
|
||||
assert.Assert(t, updatedPod.Status.ContainerStatuses[0].State.Waiting == nil)
|
||||
|
||||
terminated := updatedPod.Status.ContainerStatuses[0].State.Terminated
|
||||
assert.Equal(t, terminated.StartedAt, metav1.NewTime(startedAt))
|
||||
assert.Assert(t, terminated.StartedAt.Before(&terminated.FinishedAt))
|
||||
}
|
||||
|
||||
func newPodSpec() corev1.PodSpec {
|
||||
return corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
|
||||
Reference in New Issue
Block a user