upgrade k8s libs to 1.18.4

This commit is contained in:
Adrien Trouillaud
2020-06-29 13:03:50 -07:00
parent f934ded4a2
commit 845b4cd409
15 changed files with 240 additions and 233 deletions

View File

@@ -160,7 +160,7 @@ func TestPodLifecycle(t *testing.T) {
mp.setErrorOnDelete(errors.New("random error"))
assert.NilError(t, wireUpSystem(ctx, mp, func(ctx context.Context, s *system) {
testCreateStartDeleteScenario(ctx, t, s, deletionFunc, false)
pods, err := s.client.CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
pods, err := s.client.CoreV1().Pods(testNamespace).List(ctx, metav1.ListOptions{})
assert.NilError(t, err)
assert.Assert(t, is.Len(pods.Items, 1))
assert.Assert(t, pods.Items[0].DeletionTimestamp != nil)
@@ -329,7 +329,7 @@ func testTerminalStatePodScenario(ctx context.Context, t *testing.T, s *system,
p1 := newPod()
p1.Status.Phase = state
// Create the Pod
_, e := s.client.CoreV1().Pods(testNamespace).Create(p1)
_, e := s.client.CoreV1().Pods(testNamespace).Create(ctx, p1, metav1.CreateOptions{})
assert.NilError(t, e)
// Start the pod controller
@@ -339,7 +339,7 @@ func testTerminalStatePodScenario(ctx context.Context, t *testing.T, s *system,
time.Sleep(10 * time.Millisecond)
}
p2, err := s.client.CoreV1().Pods(testNamespace).Get(p1.Name, metav1.GetOptions{})
p2, err := s.client.CoreV1().Pods(testNamespace).Get(ctx, p1.Name, metav1.GetOptions{})
assert.NilError(t, err)
// Make sure the pods have not changed
@@ -367,7 +367,7 @@ func testDanglingPodScenarioWithDeletionTimestamp(ctx context.Context, t *testin
FieldSelector: fields.OneTermEqualSelector("metadata.name", pod.ObjectMeta.Name).String(),
}
// Setup a watch (prior to pod creation, and pod controller startup)
watcher, err := s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
watcher, err := s.client.CoreV1().Pods(testNamespace).Watch(ctx, listOptions)
assert.NilError(t, err)
defer watcher.Stop()
@@ -379,7 +379,7 @@ func testDanglingPodScenarioWithDeletionTimestamp(ctx context.Context, t *testin
podCopyWithDeletionTimestamp.DeletionGracePeriodSeconds = &deletionGracePeriod
deletionTimestamp := metav1.NewTime(time.Now().Add(time.Second * time.Duration(deletionGracePeriod)))
podCopyWithDeletionTimestamp.DeletionTimestamp = &deletionTimestamp
_, e := s.client.CoreV1().Pods(testNamespace).Create(podCopyWithDeletionTimestamp)
_, e := s.client.CoreV1().Pods(testNamespace).Create(ctx, podCopyWithDeletionTimestamp, metav1.CreateOptions{})
assert.NilError(t, e)
// Start the pod controller
@@ -415,7 +415,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
watchErrCh := make(chan error)
// Setup a watch (prior to pod creation, and pod controller startup)
watcher, err := s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
watcher, err := s.client.CoreV1().Pods(testNamespace).Watch(ctx, listOptions)
assert.NilError(t, err)
defer watcher.Stop()
// This ensures that the pod is created.
@@ -432,7 +432,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
}()
// Create the Pod
_, e := s.client.CoreV1().Pods(testNamespace).Create(p)
_, e := s.client.CoreV1().Pods(testNamespace).Create(ctx, p, metav1.CreateOptions{})
assert.NilError(t, e)
log.G(ctx).Debug("Created pod")
@@ -446,7 +446,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
}
// Setup a watch to check if the pod is in running
watcher, err = s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
watcher, err = s.client.CoreV1().Pods(testNamespace).Watch(ctx, listOptions)
assert.NilError(t, err)
defer watcher.Stop()
go func() {
@@ -471,7 +471,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
}
// Setup a watch to look for the pod eventually going away completely
watcher2, err := s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
watcher2, err := s.client.CoreV1().Pods(testNamespace).Watch(ctx, listOptions)
assert.NilError(t, err)
defer watcher2.Stop()
waitDeleteCh := make(chan error)
@@ -483,7 +483,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
}()
// Setup a watch prior to pod deletion
watcher, err = s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
watcher, err = s.client.CoreV1().Pods(testNamespace).Watch(ctx, listOptions)
assert.NilError(t, err)
defer watcher.Stop()
go func() {
@@ -493,7 +493,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
// Delete the pod via deletiontimestamp
// 1. Get the pod
currentPod, err := s.client.CoreV1().Pods(testNamespace).Get(p.Name, metav1.GetOptions{})
currentPod, err := s.client.CoreV1().Pods(testNamespace).Get(ctx, p.Name, metav1.GetOptions{})
assert.NilError(t, err)
// 2. Set the pod's deletion timestamp, version, and so on
var deletionGracePeriod int64 = 10
@@ -501,7 +501,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
deletionTimestamp := metav1.NewTime(time.Now().Add(time.Second * time.Duration(deletionGracePeriod)))
currentPod.DeletionTimestamp = &deletionTimestamp
// 3. Update (overwrite) the pod
_, err = s.client.CoreV1().Pods(testNamespace).Update(currentPod)
_, err = s.client.CoreV1().Pods(testNamespace).Update(ctx, currentPod, metav1.UpdateOptions{})
assert.NilError(t, err)
select {
@@ -535,11 +535,11 @@ func testUpdatePodWhileRunningScenario(ctx context.Context, t *testing.T, s *sys
watchErrCh := make(chan error)
// Create a Pod
_, e := s.client.CoreV1().Pods(testNamespace).Create(p)
_, e := s.client.CoreV1().Pods(testNamespace).Create(ctx, p, metav1.CreateOptions{})
assert.NilError(t, e)
// Setup a watch to check if the pod is in running
watcher, err := s.client.CoreV1().Pods(testNamespace).Watch(listOptions)
watcher, err := s.client.CoreV1().Pods(testNamespace).Watch(ctx, listOptions)
assert.NilError(t, err)
defer watcher.Stop()
go func() {
@@ -576,7 +576,7 @@ func testUpdatePodWhileRunningScenario(ctx context.Context, t *testing.T, s *sys
p.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
log.G(ctx).WithField("pod", p).Info("Updating pod")
_, err = s.client.CoreV1().Pods(p.Namespace).Update(p)
_, err = s.client.CoreV1().Pods(p.Namespace).Update(ctx, p, metav1.UpdateOptions{})
assert.NilError(t, err)
assert.NilError(t, m.getUpdates().until(ctx, func(v int) bool { return v > 0 }))
}
@@ -603,7 +603,7 @@ func benchmarkCreatePods(ctx context.Context, b *testing.B, s *system) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
pod := newPod(randomizeUID, randomizeName)
_, err := s.client.CoreV1().Pods(pod.Namespace).Create(pod)
_, err := s.client.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
assert.NilError(b, err)
assert.NilError(b, ctx.Err())
}

View File

@@ -235,7 +235,7 @@ func (n *NodeController) ensureNode(ctx context.Context) (err error) {
return err
}
node, err := n.nodes.Create(n.n)
node, err := n.nodes.Create(ctx, n.n, metav1.CreateOptions{})
if err != nil {
return pkgerrors.Wrap(err, "error registering node with kubernetes")
}
@@ -385,18 +385,18 @@ func (n *NodeController) updateStatus(ctx context.Context, skipErrorCb bool) (er
}
func ensureLease(ctx context.Context, leases v1beta1.LeaseInterface, lease *coord.Lease) (*coord.Lease, error) {
l, err := leases.Create(lease)
l, err := leases.Create(ctx, lease, metav1.CreateOptions{})
if err != nil {
switch {
case errors.IsNotFound(err):
log.G(ctx).WithError(err).Info("Node lease not supported")
return nil, err
case errors.IsAlreadyExists(err):
if err := leases.Delete(lease.Name, nil); err != nil && !errors.IsNotFound(err) {
if err := leases.Delete(ctx, lease.Name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {
log.G(ctx).WithError(err).Error("could not delete old node lease")
return nil, pkgerrors.Wrap(err, "old lease exists but could not delete it")
}
l, err = leases.Create(lease)
l, err = leases.Create(ctx, lease, metav1.CreateOptions{})
}
}
@@ -421,7 +421,7 @@ func updateNodeLease(ctx context.Context, leases v1beta1.LeaseInterface, lease *
ctx = span.WithField(ctx, "lease.expiresSeconds", *lease.Spec.LeaseDurationSeconds)
}
l, err := leases.Update(lease)
l, err := leases.Update(ctx, lease, metav1.UpdateOptions{})
if err != nil {
if errors.IsNotFound(err) {
log.G(ctx).Debug("lease not found")
@@ -570,7 +570,7 @@ func updateNodeStatus(ctx context.Context, nodes v1.NodeInterface, nodeFromProvi
var updatedNode *corev1.Node
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
apiServerNode, err := nodes.Get(nodeFromProvider.Name, emptyGetOptions)
apiServerNode, err := nodes.Get(ctx, nodeFromProvider.Name, emptyGetOptions)
if err != nil {
return err
}
@@ -583,7 +583,7 @@ func updateNodeStatus(ctx context.Context, nodes v1.NodeInterface, nodeFromProvi
}
log.G(ctx).WithError(err).WithField("patch", string(patchBytes)).Debug("Generated three way patch")
updatedNode, err = nodes.Patch(nodeFromProvider.Name, types.StrategicMergePatchType, patchBytes, "status")
updatedNode, err = nodes.Patch(ctx, nodeFromProvider.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
if err != nil {
// We cannot wrap this error because the kubernetes error module doesn't understand wrapping
log.G(ctx).WithField("patch", string(patchBytes)).WithError(err).Warn("Failed to patch node status")

View File

@@ -74,11 +74,11 @@ func testNodeRun(t *testing.T, enableLease bool) {
close(chErr)
}()
nw := makeWatch(t, nodes, testNodeCopy.Name)
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
nr := nw.ResultChan()
lw := makeWatch(t, leases, testNodeCopy.Name)
lw := makeWatch(ctx, t, leases, testNodeCopy.Name)
defer lw.Stop()
lr := lw.ResultChan()
@@ -132,7 +132,7 @@ func testNodeRun(t *testing.T, enableLease bool) {
}
// trigger an async node status update
n, err := nodes.Get(testNode.Name, metav1.GetOptions{})
n, err := nodes.Get(ctx, testNode.Name, metav1.GetOptions{})
assert.NilError(t, err)
newCondition := corev1.NodeCondition{
Type: corev1.NodeConditionType("UPDATED"),
@@ -140,7 +140,7 @@ func testNodeRun(t *testing.T, enableLease bool) {
}
n.Status.Conditions = append(n.Status.Conditions, newCondition)
nw = makeWatch(t, nodes, testNodeCopy.Name)
nw = makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
nr = nw.ResultChan()
@@ -207,7 +207,7 @@ func TestNodeCustomUpdateStatusErrorHandler(t *testing.T) {
case <-node.Ready():
}
err = nodes.Delete(node.n.Name, nil)
err = nodes.Delete(ctx, node.n.Name, metav1.DeleteOptions{})
assert.NilError(t, err)
testP.triggerStatusUpdate(node.n.DeepCopy())
@@ -253,7 +253,7 @@ func TestUpdateNodeStatus(t *testing.T) {
updated, err := updateNodeStatus(ctx, nodes, n.DeepCopy())
assert.Equal(t, errors.IsNotFound(err), true, err)
_, err = nodes.Create(n)
_, err = nodes.Create(ctx, n, metav1.CreateOptions{})
assert.NilError(t, err)
updated, err = updateNodeStatus(ctx, nodes, n.DeepCopy())
@@ -267,10 +267,10 @@ func TestUpdateNodeStatus(t *testing.T) {
assert.NilError(t, err)
assert.Check(t, cmp.DeepEqual(n.Status, updated.Status))
err = nodes.Delete(n.Name, nil)
err = nodes.Delete(ctx, n.Name, metav1.DeleteOptions{})
assert.NilError(t, err)
_, err = nodes.Get(n.Name, metav1.GetOptions{})
_, err = nodes.Get(ctx, n.Name, metav1.GetOptions{})
assert.Equal(t, errors.IsNotFound(err), true, err)
_, err = updateNodeStatus(ctx, nodes, updated.DeepCopy())
@@ -289,7 +289,7 @@ func TestUpdateNodeLease(t *testing.T) {
assert.Equal(t, l.Name, lease.Name)
assert.Assert(t, cmp.DeepEqual(l.Spec.HolderIdentity, lease.Spec.HolderIdentity))
compare, err := leases.Get(l.Name, emptyGetOptions)
compare, err := leases.Get(ctx, l.Name, emptyGetOptions)
assert.NilError(t, err)
assert.Equal(t, l.Spec.RenewTime.Time.Unix(), compare.Spec.RenewTime.Time.Unix())
assert.Equal(t, compare.Name, lease.Name)
@@ -397,7 +397,7 @@ func TestBeforeAnnotationsPreserved(t *testing.T) {
testNodeCreateCopy.Annotations = map[string]string{
"beforeAnnotation": "value",
}
_, err := nodes.Create(testNodeCreateCopy)
_, err := nodes.Create(ctx, testNodeCreateCopy, metav1.CreateOptions{})
assert.NilError(t, err)
// We have to refer to testNodeCopy during the course of the test. testNode is modified by the node controller
@@ -417,7 +417,7 @@ func TestBeforeAnnotationsPreserved(t *testing.T) {
close(chErr)
}()
nw := makeWatch(t, nodes, testNodeCopy.Name)
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
nr := nw.ResultChan()
@@ -446,7 +446,7 @@ func TestBeforeAnnotationsPreserved(t *testing.T) {
return ok
}))
newNode, err := nodes.Get(testNodeCopy.Name, emptyGetOptions)
newNode, err := nodes.Get(ctx, testNodeCopy.Name, emptyGetOptions)
assert.NilError(t, err)
assert.Assert(t, is.Contains(newNode.Annotations, "testAnnotation"))
@@ -487,7 +487,7 @@ func TestManualConditionsPreserved(t *testing.T) {
close(chErr)
}()
nw := makeWatch(t, nodes, testNodeCopy.Name)
nw := makeWatch(ctx, t, nodes, testNodeCopy.Name)
defer nw.Stop()
nr := nw.ResultChan()
@@ -503,7 +503,7 @@ func TestManualConditionsPreserved(t *testing.T) {
return true
}))
newNode, err := nodes.Get(testNodeCopy.Name, emptyGetOptions)
newNode, err := nodes.Get(ctx, testNodeCopy.Name, emptyGetOptions)
assert.NilError(t, err)
assert.Assert(t, is.Len(newNode.Status.Conditions, 0))
@@ -538,7 +538,7 @@ func TestManualConditionsPreserved(t *testing.T) {
return false
}))
newNode, err = nodes.Get(testNodeCopy.Name, emptyGetOptions)
newNode, err = nodes.Get(ctx, testNodeCopy.Name, emptyGetOptions)
assert.NilError(t, err)
assert.Assert(t, is.Len(newNode.Status.Conditions, 1))
assert.Assert(t, is.Contains(newNode.Annotations, "testAnnotation"))
@@ -551,13 +551,13 @@ func TestManualConditionsPreserved(t *testing.T) {
Message: "This is a manually added condition. Outside of VK. It should not be removed.",
}
assert.NilError(t, retry.RetryOnConflict(retry.DefaultRetry, func() error {
newNode, err = nodes.Get(testNodeCopy.Name, emptyGetOptions)
newNode, err = nodes.Get(ctx, testNodeCopy.Name, emptyGetOptions)
if err != nil {
return err
}
newNode.Annotations["manuallyAddedAnnotation"] = "value"
newNode.Status.Conditions = append(newNode.Status.Conditions, manuallyAddedCondition)
_, err = nodes.UpdateStatus(newNode)
_, err = nodes.UpdateStatus(ctx, newNode, metav1.UpdateOptions{})
return err
}))
@@ -608,7 +608,7 @@ func TestManualConditionsPreserved(t *testing.T) {
}))
// Make sure that all three conditions are there.
newNode, err = nodes.Get(testNodeCopy.Name, emptyGetOptions)
newNode, err = nodes.Get(ctx, testNodeCopy.Name, emptyGetOptions)
assert.NilError(t, err)
seenConditionTypes := make([]corev1.NodeConditionType, len(newNode.Status.Conditions))
for idx := range newNode.Status.Conditions {
@@ -667,13 +667,13 @@ func (tnp *testNodeProviderPing) Ping(ctx context.Context) error {
}
type watchGetter interface {
Watch(metav1.ListOptions) (watch.Interface, error)
Watch(context.Context, metav1.ListOptions) (watch.Interface, error)
}
func makeWatch(t *testing.T, wc watchGetter, name string) watch.Interface {
func makeWatch(ctx context.Context, t *testing.T, wc watchGetter, name string) watch.Interface {
t.Helper()
w, err := wc.Watch(metav1.ListOptions{FieldSelector: "name=" + name})
w, err := wc.Watch(ctx, metav1.ListOptions{FieldSelector: "name=" + name})
assert.NilError(t, err)
return w
}

View File

@@ -159,7 +159,7 @@ func (pc *PodController) handleProviderError(ctx context.Context, span trace.Spa
"reason": pod.Status.Reason,
})
_, err := pc.client.Pods(pod.Namespace).UpdateStatus(pod)
_, err := pc.client.Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{})
if err != nil {
logger.WithError(err).Warn("Failed to update pod status")
} else {
@@ -216,7 +216,7 @@ func (pc *PodController) updatePodStatus(ctx context.Context, podFromKubernetes
// the pod status, and we should be the sole writers of the pod status, we can blind overwrite it. Therefore
// we need to copy the pod and set ResourceVersion to 0.
podFromProvider.ResourceVersion = "0"
if _, err := pc.client.Pods(podFromKubernetes.Namespace).UpdateStatus(podFromProvider); err != nil {
if _, err := pc.client.Pods(podFromKubernetes.Namespace).UpdateStatus(ctx, podFromProvider, metav1.UpdateOptions{}); err != nil {
span.SetStatus(err)
return pkgerrors.Wrap(err, "error while updating pod status in kubernetes")
}
@@ -322,7 +322,7 @@ func (pc *PodController) deletePodHandler(ctx context.Context, key string) (retE
// We don't check with the provider before doing this delete. At this point, even if an outstanding pod status update
// was in progress,
err = pc.client.Pods(namespace).Delete(name, metav1.NewDeleteOptions(0))
err = pc.client.Pods(namespace).Delete(ctx, name, *metav1.NewDeleteOptions(0))
if errors.IsNotFound(err) {
return nil
}