Files
virtual-kubelet/node/pod_test.go
Sargun Dhillon 1b8597647b Refactor queue code
This refactor is a preparation for another commit. I want to add instrumentation
around our queues. The code of how queues were handled was spread throughout
the code base, and that made adding such instrumentation nice and complicated.

This centralizes the queue management logic in queue.go, and only requires
the user to provide a (custom) rate limiter, if they want to, a name,
and a handler.

The lease code is moved into its own package to simplify testing, because
the goroutine leak tester was triggering incorrectly if other tests
were running, and it was measuring leaks from those tests.

This also identified buggy behaviour:

wq := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), "test")
wq.AddRateLimited("hi")
fmt.Printf("Added hi, len: %d\n", wq.Len())

wq.Forget("hi")
fmt.Printf("Forgot hi, len: %d\n", wq.Len())

wq.Done("hi")
fmt.Printf("Done hi, len: %d\n", wq.Len())

---
Prints all 0s because event non-delayed items are delayed. If you call Add
directly, then the last line prints a len of 2.

// Workqueue docs:
// Forget indicates that an item is finished being retried.  Doesn't matter whether it's for perm failing
// or for success, we'll stop the rate limiter from tracking it.  This only clears the `rateLimiter`, you
// still have to call `Done` on the queue.

^----- Even this seems untrue
2021-01-08 00:56:05 -08:00

339 lines
9.1 KiB
Go

// Copyright © 2017 The virtual-kubelet authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package node
import (
"context"
"fmt"
"testing"
"time"
testutil "github.com/virtual-kubelet/virtual-kubelet/internal/test/util"
"golang.org/x/time/rate"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/util/workqueue"
)
type TestController struct {
*PodController
mock *mockProviderAsync
client *fake.Clientset
}
func newTestController() *TestController {
fk8s := fake.NewSimpleClientset()
rm := testutil.FakeResourceManager()
p := newMockProvider()
iFactory := kubeinformers.NewSharedInformerFactoryWithOptions(fk8s, 10*time.Minute)
rateLimiter := workqueue.NewMaxOfRateLimiter(
// The default upper bound is 1000 seconds. Let's not use that.
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 10*time.Millisecond),
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
podController, err := NewPodController(PodControllerConfig{
PodClient: fk8s.CoreV1(),
PodInformer: iFactory.Core().V1().Pods(),
EventRecorder: testutil.FakeEventRecorder(5),
Provider: p,
ConfigMapInformer: iFactory.Core().V1().ConfigMaps(),
SecretInformer: iFactory.Core().V1().Secrets(),
ServiceInformer: iFactory.Core().V1().Services(),
RateLimiter: rateLimiter,
})
if err != nil {
panic(err)
}
// Override the resource manager in the contructor with our own.
podController.resourceManager = rm
return &TestController{
PodController: podController,
mock: p,
client: fk8s,
}
}
// Run starts the informer and runs the pod controller
func (tc *TestController) Run(ctx context.Context, n int) error {
go tc.podsInformer.Informer().Run(ctx.Done())
return tc.PodController.Run(ctx, n)
}
func TestPodsEqual(t *testing.T) {
p1 := &corev1.Pod{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx:1.15.12-perl",
Ports: []corev1.ContainerPort{
{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
},
}
p2 := p1.DeepCopy()
assert.Assert(t, podsEqual(p1, p2))
}
func TestPodsDifferent(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
p2.Spec.Containers[0].Image = "nginx:1.15.12-perl"
assert.Assert(t, !podsEqual(p1, p2))
}
func TestPodsDifferentIgnoreValue(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
p2.Status.Phase = corev1.PodFailed
assert.Assert(t, podsEqual(p1, p2))
}
func TestPodShouldEnqueueDifferentDeleteTimeStamp(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
now := v1.NewTime(time.Now())
p2.DeletionTimestamp = &now
assert.Assert(t, podShouldEnqueue(p1, p2))
}
func TestPodShouldEnqueueDifferentLabel(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
p2.Labels = map[string]string{"test": "test"}
assert.Assert(t, podShouldEnqueue(p1, p2))
}
func TestPodShouldEnqueueDifferentAnnotation(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
p2.Annotations = map[string]string{"test": "test"}
assert.Assert(t, podShouldEnqueue(p1, p2))
}
func TestPodShouldNotEnqueueDifferentStatus(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
p2.Status.Phase = corev1.PodSucceeded
assert.Assert(t, !podShouldEnqueue(p1, p2))
}
func TestPodShouldEnqueueDifferentDeleteGraceTime(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
oldTime := v1.NewTime(time.Now().Add(5))
newTime := v1.NewTime(time.Now().Add(10))
oldGraceTime := int64(5)
newGraceTime := int64(10)
p1.DeletionGracePeriodSeconds = &oldGraceTime
p2.DeletionTimestamp = &oldTime
p2.DeletionGracePeriodSeconds = &newGraceTime
p2.DeletionTimestamp = &newTime
assert.Assert(t, podShouldEnqueue(p1, p2))
}
func TestPodShouldEnqueueGraceTimeChanged(t *testing.T) {
p1 := &corev1.Pod{
Spec: newPodSpec(),
}
p2 := p1.DeepCopy()
graceTime := int64(30)
p2.DeletionGracePeriodSeconds = &graceTime
assert.Assert(t, podShouldEnqueue(p1, p2))
}
func TestPodCreateNewPod(t *testing.T) {
svr := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default" //nolint:goconst
pod.ObjectMeta.Name = "nginx" //nolint:goconst
pod.Spec = newPodSpec()
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
assert.Check(t, is.Nil(err))
// createOrUpdate called CreatePod but did not call UpdatePod because the pod did not exist
assert.Check(t, is.Equal(svr.mock.creates.read(), 1))
assert.Check(t, is.Equal(svr.mock.updates.read(), 0))
}
func TestPodUpdateExisting(t *testing.T) {
svr := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Spec = newPodSpec()
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
assert.Check(t, is.Nil(err))
assert.Check(t, is.Equal(svr.mock.creates.read(), 1))
assert.Check(t, is.Equal(svr.mock.updates.read(), 0))
pod2 := pod.DeepCopy()
pod2.Spec.Containers[0].Image = "nginx:1.15.12-perl"
err = svr.createOrUpdatePod(context.Background(), pod2.DeepCopy())
assert.Check(t, is.Nil(err))
// createOrUpdate didn't call CreatePod but did call UpdatePod because the spec changed
assert.Check(t, is.Equal(svr.mock.creates.read(), 1))
assert.Check(t, is.Equal(svr.mock.updates.read(), 1))
}
func TestPodNoSpecChange(t *testing.T) {
svr := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Spec = newPodSpec()
err := svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
assert.Check(t, is.Nil(err))
assert.Check(t, is.Equal(svr.mock.creates.read(), 1))
assert.Check(t, is.Equal(svr.mock.updates.read(), 0))
err = svr.createOrUpdatePod(context.Background(), pod.DeepCopy())
assert.Check(t, is.Nil(err))
// createOrUpdate didn't call CreatePod or UpdatePod, spec didn't change
assert.Check(t, is.Equal(svr.mock.creates.read(), 1))
assert.Check(t, is.Equal(svr.mock.updates.read(), 0))
}
func TestPodStatusDelete(t *testing.T) {
ctx := context.Background()
c := newTestController()
pod := &corev1.Pod{}
pod.ObjectMeta.Namespace = "default"
pod.ObjectMeta.Name = "nginx"
pod.Spec = newPodSpec()
fk8s := fake.NewSimpleClientset(pod)
c.client = fk8s
c.PodController.client = fk8s.CoreV1()
podCopy := pod.DeepCopy()
deleteTime := v1.Time{Time: time.Now().Add(30 * time.Second)}
podCopy.DeletionTimestamp = &deleteTime
key := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)
c.knownPods.Store(key, &knownPod{lastPodStatusReceivedFromProvider: podCopy})
// test pod in provider delete
err := c.updatePodStatus(ctx, pod, key)
if err != nil {
t.Fatal("pod updated failed")
}
newPod, err := c.client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, v1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
t.Fatalf("Get pod %v failed", key)
}
if newPod != nil && newPod.DeletionTimestamp == nil {
t.Fatalf("Pod %v delete failed", key)
}
t.Logf("pod delete success")
// test pod in provider delete
pod.DeletionTimestamp = &deleteTime
if _, err = c.client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, v1.CreateOptions{}); err != nil {
t.Fatal("Parepare pod in k8s failed")
}
podCopy.Status.ContainerStatuses = []corev1.ContainerStatus{
{
State: corev1.ContainerState{
Waiting: nil,
Running: nil,
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 1,
Message: "Exit",
},
},
},
}
c.knownPods.Store(key, &knownPod{lastPodStatusReceivedFromProvider: podCopy})
err = c.updatePodStatus(ctx, pod, key)
if err != nil {
t.Fatalf("pod updated failed %v", err)
}
newPod, err = c.client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, v1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
t.Fatalf("Get pod %v failed", key)
}
if newPod.DeletionTimestamp == nil {
t.Fatalf("Pod %v delete failed", key)
}
if newPod.Status.ContainerStatuses[0].State.Terminated == nil {
t.Fatalf("Pod status %v update failed", key)
}
t.Logf("pod updated, container status: %+v, pod delete Time: %v", newPod.Status.ContainerStatuses[0].State.Terminated, newPod.DeletionTimestamp)
}
func newPodSpec() corev1.PodSpec {
return corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "nginx",
Image: "nginx:1.15.12",
Ports: []corev1.ContainerPort{
{
ContainerPort: 443,
Protocol: "tcp",
},
},
},
},
}
}