Merge pull request #564 from cpuguy83/fix_version_on_node_create
Fix node create after delete
This commit is contained in:
44
Makefile
44
Makefile
@@ -6,6 +6,7 @@ github_repo := virtual-kubelet/virtual-kubelet
|
|||||||
binary := virtual-kubelet
|
binary := virtual-kubelet
|
||||||
build_tags := netgo osusergo $(VK_BUILD_TAGS)
|
build_tags := netgo osusergo $(VK_BUILD_TAGS)
|
||||||
|
|
||||||
|
include Makefile.e2e
|
||||||
|
|
||||||
# comment this line out for quieter things
|
# comment this line out for quieter things
|
||||||
# V := 1 # When V is set, print commands and build progress.
|
# V := 1 # When V is set, print commands and build progress.
|
||||||
@@ -119,50 +120,7 @@ format: $(GOPATH)/bin/goimports
|
|||||||
$Q find . -iname \*.go | grep -v \
|
$Q find . -iname \*.go | grep -v \
|
||||||
-e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES)) | xargs goimports -w
|
-e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES)) | xargs goimports -w
|
||||||
|
|
||||||
.PHONY: skaffold-validate
|
|
||||||
skaffold-validate:
|
|
||||||
@if [[ ! "minikube,docker-for-desktop" =~ .*"$(kubectl_context)".* ]]; then \
|
|
||||||
echo current-context is [$(kubectl_context)]. Must be one of [minikube,docker-for-desktop]; false; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
.PHONY: skaffold-build
|
|
||||||
skaffold-build: tags_with_mock := $(VK_BUILD_TAGS) mock_provider
|
|
||||||
skaffold-build:
|
|
||||||
@if [[ ! "$(MODE)" == "delete" ]]; then \
|
|
||||||
GOOS=linux GOARCH=amd64 $(MAKE) VK_BUILD_TAGS="$(tags_with_mock)" build; \
|
|
||||||
fi
|
|
||||||
|
|
||||||
# skaffold deploys the virtual-kubelet to the Kubernetes cluster targeted by the current kubeconfig using skaffold.
|
|
||||||
# The current context (as indicated by "kubectl config current-context") must be one of "minikube" or "docker-for-desktop".
|
|
||||||
# MODE must be set to one of "dev" (default), "delete" or "run", and is used as the skaffold command to be run.
|
|
||||||
.PHONY: skaffold
|
|
||||||
skaffold: MODE ?= dev
|
|
||||||
skaffold: PROFILE := local
|
|
||||||
skaffold: skaffold-validate skaffold-build
|
|
||||||
@skaffold $(MODE) \
|
|
||||||
-f $(PWD)/hack/skaffold/virtual-kubelet/skaffold.yml \
|
|
||||||
-p $(PROFILE)
|
|
||||||
|
|
||||||
# e2e runs the end-to-end test suite against the Kubernetes cluster targeted by the current kubeconfig.
|
|
||||||
# It automatically deploys the virtual-kubelet with the mock provider by running "make skaffold MODE=run".
|
|
||||||
# It is the caller's responsibility to cleanup the deployment after running this target (e.g. by running "make skaffold MODE=delete").
|
|
||||||
.PHONY: e2e
|
|
||||||
e2e: KUBECONFIG ?= $(HOME)/.kube/config
|
|
||||||
e2e: NAMESPACE := default
|
|
||||||
e2e: NODE_NAME := vkubelet-mock-0
|
|
||||||
e2e: TAINT_KEY := virtual-kubelet.io/provider
|
|
||||||
e2e: TAINT_VALUE := mock
|
|
||||||
e2e: TAINT_EFFECT := NoSchedule
|
|
||||||
e2e:
|
|
||||||
@$(MAKE) skaffold MODE=delete && kubectl delete --ignore-not-found node $(NODE_NAME)
|
|
||||||
@$(MAKE) skaffold MODE=run
|
|
||||||
@cd $(PWD)/test/e2e && go test -v -tags e2e ./... \
|
|
||||||
-kubeconfig=$(KUBECONFIG) \
|
|
||||||
-namespace=$(NAMESPACE) \
|
|
||||||
-node-name=$(NODE_NAME) \
|
|
||||||
-taint-key=$(TAINT_KEY) \
|
|
||||||
-taint-value=$(TAINT_VALUE) \
|
|
||||||
-taint-effect=$(TAINT_EFFECT)
|
|
||||||
|
|
||||||
##### =====> Internals <===== #####
|
##### =====> Internals <===== #####
|
||||||
|
|
||||||
|
|||||||
47
Makefile.e2e
Normal file
47
Makefile.e2e
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
.PHONY: skaffold.validate
|
||||||
|
skaffold.validate: kubectl_context := $(shell kubectl config current-context)
|
||||||
|
skaffold.validate:
|
||||||
|
if [[ ! "minikube,docker-for-desktop,docker-desktop" =~ .*"$(kubectl_context)".* ]]; then \
|
||||||
|
echo current-context is [$(kubectl_context)]. Must be one of [minikube,docker-for-desktop,docker-desktop]; \
|
||||||
|
false; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
# skaffold deploys the virtual-kubelet to the Kubernetes cluster targeted by the current kubeconfig using skaffold.
|
||||||
|
# The current context (as indicated by "kubectl config current-context") must be one of "minikube" or "docker-for-desktop".
|
||||||
|
# MODE must be set to one of "dev" (default), "delete" or "run", and is used as the skaffold command to be run.
|
||||||
|
.PHONY: skaffold
|
||||||
|
skaffold: MODE ?= dev
|
||||||
|
skaffold: skaffold/$(MODE)
|
||||||
|
|
||||||
|
.PHONY: skaffold/%
|
||||||
|
skaffold/%: PROFILE := local
|
||||||
|
skaffold/%: skaffold.validate
|
||||||
|
skaffold $(*) \
|
||||||
|
-f $(PWD)/hack/skaffold/virtual-kubelet/skaffold.yml \
|
||||||
|
-p $(PROFILE)
|
||||||
|
|
||||||
|
# e2e runs the end-to-end test suite against the Kubernetes cluster targeted by the current kubeconfig.
|
||||||
|
# It automatically deploys the virtual-kubelet with the mock provider by running "make skaffold MODE=run".
|
||||||
|
# It is the caller's responsibility to cleanup the deployment after running this target (e.g. by running "make skaffold MODE=delete").
|
||||||
|
.PHONY: e2e
|
||||||
|
e2e: KUBECONFIG ?= $(HOME)/.kube/config
|
||||||
|
e2e: NAMESPACE := default
|
||||||
|
e2e: NODE_NAME := vkubelet-mock-0
|
||||||
|
e2e: TAINT_KEY := virtual-kubelet.io/provider
|
||||||
|
e2e: TAINT_VALUE := mock
|
||||||
|
e2e: TAINT_EFFECT := NoSchedule
|
||||||
|
e2e: tags_with_mock := $(VK_BUILD_TAGS) mock
|
||||||
|
e2e: e2e.clean
|
||||||
|
GOOS=linux GOARCH=amd64 $(MAKE) VK_BUILD_TAGS="$(tags_with_mock)" build; \
|
||||||
|
$(MAKE) skaffold/run; \
|
||||||
|
cd $(PWD)/test/e2e && go test -v -tags e2e ./... \
|
||||||
|
-kubeconfig=$(KUBECONFIG) \
|
||||||
|
-namespace=$(NAMESPACE) \
|
||||||
|
-node-name=$(NODE_NAME) \
|
||||||
|
-taint-key=$(TAINT_KEY) \
|
||||||
|
-taint-value=$(TAINT_VALUE) \
|
||||||
|
-taint-effect=$(TAINT_EFFECT)
|
||||||
|
|
||||||
|
.PHONY: e2e.clean
|
||||||
|
e2e.clean: #skaffold/delete
|
||||||
|
kubectl delete --ignore-not-found node $(NODE_NAME)
|
||||||
@@ -7,11 +7,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"github.com/virtual-kubelet/virtual-kubelet/vkubelet"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||||
|
|
||||||
"github.com/virtual-kubelet/virtual-kubelet/vkubelet"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
58
test/e2e/framework/node.go
Normal file
58
test/e2e/framework/node.go
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
package framework
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
watchapi "k8s.io/apimachinery/pkg/watch"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/client-go/tools/watch"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WaitUntilNodeCondition establishes a watch on the vk node.
|
||||||
|
// Then, it waits for the specified condition function to be verified.
|
||||||
|
func (f *Framework) WaitUntilNodeCondition(fn watch.ConditionFunc) error {
|
||||||
|
// Create a field selector that matches the specified Pod resource.
|
||||||
|
fs := fields.OneTermEqualSelector("metadata.name", f.NodeName).String()
|
||||||
|
// Create a ListWatch so we can receive events for the matched Pod resource.
|
||||||
|
lw := &cache.ListWatch{
|
||||||
|
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||||
|
options.FieldSelector = fs
|
||||||
|
return f.KubeClient.CoreV1().Nodes().List(options)
|
||||||
|
},
|
||||||
|
WatchFunc: func(options metav1.ListOptions) (watchapi.Interface, error) {
|
||||||
|
options.FieldSelector = fs
|
||||||
|
return f.KubeClient.CoreV1().Nodes().Watch(options)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Watch for updates to the Pod resource until fn is satisfied, or until the timeout is reached.
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
|
||||||
|
defer cancel()
|
||||||
|
last, err := watch.UntilWithSync(ctx, lw, &corev1.Node{}, nil, fn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if last == nil {
|
||||||
|
return fmt.Errorf("no events received for node %q", f.NodeName)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitUntilNodeAdded is a watch condition which waits until the VK node object
|
||||||
|
// is added.
|
||||||
|
func (f *Framework) WaitUntilNodeAdded(event watchapi.Event) (bool, error) {
|
||||||
|
if event.Type != watchapi.Added {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return event.Object.(*corev1.Node).Name == f.NodeName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteNode deletes the vk node used by the framework
|
||||||
|
func (f *Framework) DeleteNode() error {
|
||||||
|
return f.KubeClient.CoreV1().Nodes().Delete(f.NodeName, nil)
|
||||||
|
}
|
||||||
51
test/e2e/node_test.go
Normal file
51
test/e2e/node_test.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
// +build e2e
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"gotest.tools/assert"
|
||||||
|
watchapi "k8s.io/apimachinery/pkg/watch"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestNodeCreateAfterDelete makes sure that a node is automatically recreated
|
||||||
|
// if it is deleted while VK is running.
|
||||||
|
func TestNodeCreateAfterDelete(t *testing.T) {
|
||||||
|
chErr := make(chan error, 1)
|
||||||
|
chDone := make(chan struct{})
|
||||||
|
defer close(chDone)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
var deleted bool
|
||||||
|
wait := func(e watchapi.Event) (bool, error) {
|
||||||
|
select {
|
||||||
|
case <-chDone:
|
||||||
|
return true, nil
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
if deleted {
|
||||||
|
return f.WaitUntilNodeAdded(e)
|
||||||
|
}
|
||||||
|
if e.Type == watchapi.Deleted {
|
||||||
|
deleted = true
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
chErr <- f.WaitUntilNodeCondition(wait)
|
||||||
|
}()
|
||||||
|
|
||||||
|
assert.NilError(t, f.DeleteNode())
|
||||||
|
|
||||||
|
timer := time.NewTimer(60 * time.Second)
|
||||||
|
defer timer.Stop()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
t.Fatal("timeout waiting for node to be recreated")
|
||||||
|
case err := <-chErr:
|
||||||
|
assert.NilError(t, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -363,7 +363,9 @@ func UpdateNodeStatus(ctx context.Context, nodes v1.NodeInterface, n *corev1.Nod
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.G(ctx).Debug("node not found")
|
log.G(ctx).Debug("node not found")
|
||||||
node, err = nodes.Create(n.DeepCopy())
|
newNode := n.DeepCopy()
|
||||||
|
newNode.ResourceVersion = ""
|
||||||
|
node, err = nodes.Create(newNode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"gotest.tools/assert/cmp"
|
"gotest.tools/assert/cmp"
|
||||||
coord "k8s.io/api/coordination/v1beta1"
|
coord "k8s.io/api/coordination/v1beta1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
watch "k8s.io/apimachinery/pkg/watch"
|
watch "k8s.io/apimachinery/pkg/watch"
|
||||||
testclient "k8s.io/client-go/kubernetes/fake"
|
testclient "k8s.io/client-go/kubernetes/fake"
|
||||||
@@ -184,6 +185,17 @@ func TestUpdateNodeStatus(t *testing.T) {
|
|||||||
updated, err = UpdateNodeStatus(ctx, nodes, n.DeepCopy())
|
updated, err = UpdateNodeStatus(ctx, nodes, n.DeepCopy())
|
||||||
assert.NilError(t, err)
|
assert.NilError(t, err)
|
||||||
assert.Check(t, cmp.DeepEqual(n.Status, updated.Status))
|
assert.Check(t, cmp.DeepEqual(n.Status, updated.Status))
|
||||||
|
|
||||||
|
err = nodes.Delete(n.Name, nil)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
_, err = nodes.Get(n.Name, metav1.GetOptions{})
|
||||||
|
assert.Equal(t, errors.IsNotFound(err), true, err)
|
||||||
|
|
||||||
|
updated, err = UpdateNodeStatus(ctx, nodes, updated.DeepCopy())
|
||||||
|
assert.NilError(t, err)
|
||||||
|
_, err = nodes.Get(n.Name, metav1.GetOptions{})
|
||||||
|
assert.NilError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUpdateNodeLease(t *testing.T) {
|
func TestUpdateNodeLease(t *testing.T) {
|
||||||
|
|||||||
Reference in New Issue
Block a user