Wait for the informer to become in sync before starting tests
If the informers are starting at the same time as createPods, then we can get into a situation where the pod seems to get "lost". Instead, we wait for the informer to get into sync prior to the createpod event. This also moves to one informer as a microoptimization in the tests.
This commit is contained in:
@@ -23,6 +23,7 @@ import (
|
|||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
watchutils "k8s.io/client-go/tools/watch"
|
watchutils "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
@@ -263,14 +264,11 @@ func wireUpSystem(ctx context.Context, provider PodLifecycleHandler, f testFunct
|
|||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
|
|
||||||
// This is largely copy and pasted code from the root command
|
// This is largely copy and pasted code from the root command
|
||||||
podInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(
|
sharedInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||||
client,
|
client,
|
||||||
informerResyncPeriod,
|
informerResyncPeriod,
|
||||||
kubeinformers.WithNamespace(testNamespace),
|
|
||||||
)
|
)
|
||||||
podInformer := podInformerFactory.Core().V1().Pods()
|
podInformer := sharedInformerFactory.Core().V1().Pods()
|
||||||
|
|
||||||
scmInformerFactory := kubeinformers.NewSharedInformerFactory(client, informerResyncPeriod)
|
|
||||||
|
|
||||||
eb := record.NewBroadcaster()
|
eb := record.NewBroadcaster()
|
||||||
eb.StartLogging(log.G(ctx).Infof)
|
eb.StartLogging(log.G(ctx).Infof)
|
||||||
@@ -279,9 +277,9 @@ func wireUpSystem(ctx context.Context, provider PodLifecycleHandler, f testFunct
|
|||||||
logger: log.G(ctx),
|
logger: log.G(ctx),
|
||||||
}
|
}
|
||||||
|
|
||||||
secretInformer := scmInformerFactory.Core().V1().Secrets()
|
secretInformer := sharedInformerFactory.Core().V1().Secrets()
|
||||||
configMapInformer := scmInformerFactory.Core().V1().ConfigMaps()
|
configMapInformer := sharedInformerFactory.Core().V1().ConfigMaps()
|
||||||
serviceInformer := scmInformerFactory.Core().V1().Services()
|
serviceInformer := sharedInformerFactory.Core().V1().Services()
|
||||||
sys := &system{
|
sys := &system{
|
||||||
client: client,
|
client: client,
|
||||||
retChan: make(chan error, 1),
|
retChan: make(chan error, 1),
|
||||||
@@ -302,8 +300,15 @@ func wireUpSystem(ctx context.Context, provider PodLifecycleHandler, f testFunct
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
go scmInformerFactory.Start(ctx.Done())
|
go sharedInformerFactory.Start(ctx.Done())
|
||||||
go podInformerFactory.Start(ctx.Done())
|
sharedInformerFactory.WaitForCacheSync(ctx.Done())
|
||||||
|
if ok := cache.WaitForCacheSync(ctx.Done(), podInformer.Informer().HasSynced); !ok {
|
||||||
|
return errors.New("podinformer failed to sync")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ctx.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
f(ctx, sys)
|
f(ctx, sys)
|
||||||
|
|
||||||
@@ -389,6 +394,7 @@ func testCreateStartDeleteScenario(ctx context.Context, t *testing.T, s *system,
|
|||||||
// Create the Pod
|
// Create the Pod
|
||||||
_, e := s.client.CoreV1().Pods(testNamespace).Create(p)
|
_, e := s.client.CoreV1().Pods(testNamespace).Create(p)
|
||||||
assert.NilError(t, e)
|
assert.NilError(t, e)
|
||||||
|
log.G(ctx).Debug("Created pod")
|
||||||
|
|
||||||
// This will return once
|
// This will return once
|
||||||
select {
|
select {
|
||||||
|
|||||||
Reference in New Issue
Block a user