use shared informers and workqueue (#425)
* vendor: add vendored code
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* controller: use shared informers and a work queue
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* errors: use cpuguy83/strongerrors
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* aci: fix test that uses resource manager
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* readme: clarify skaffold run before e2e
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* cmd: use root context everywhere
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: refactor pod lifecycle management
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* e2e: fix race in test when observing deletions
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* e2e: test pod forced deletion
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* cmd: fix root context potential leak
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: rename metaKey
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: remove calls to HandleError
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* Revert "errors: use cpuguy83/strongerrors"
This reverts commit f031fc6d.
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* manager: remove redundant lister constraint
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: rename the pod event recorder
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: amend misleading comment
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* mock: add tracing
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: add tracing
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* test: observe timeouts
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* trace: remove unnecessary comments
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: limit concurrency in deleteDanglingPods
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: never store context, always pass in calls
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: remove HandleCrash and just panic
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: don't sync succeeded pods
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: ensure pod deletion from kubernetes
Signed-off-by: Paulo Pires <pjpires@gmail.com>
This commit is contained in:
committed by
Robbie Zhang
parent
0e9cfca585
commit
28a757f4da
@@ -2,321 +2,50 @@ package manager
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
)
|
||||
|
||||
// ResourceManager works a cache for pods assigned to this virtual node within Kubernetes.
|
||||
// New ResourceManagers should be created with the NewResourceManager() function.
|
||||
// ResourceManager acts as a passthrough to a cache (lister) for pods assigned to the current node.
|
||||
// It is also a passthrough to a cache (lister) for Kubernetes secrets and config maps.
|
||||
type ResourceManager struct {
|
||||
sync.RWMutex
|
||||
k8sClient kubernetes.Interface
|
||||
|
||||
pods map[string]*v1.Pod
|
||||
deletingPods map[string]*v1.Pod
|
||||
configMapRef map[string]int64
|
||||
configMaps map[string]*v1.ConfigMap
|
||||
secretRef map[string]int64
|
||||
secrets map[string]*v1.Secret
|
||||
podLister corev1listers.PodLister
|
||||
secretLister corev1listers.SecretLister
|
||||
configMapLister corev1listers.ConfigMapLister
|
||||
}
|
||||
|
||||
// NewResourceManager returns a ResourceManager with the internal maps initialized.
|
||||
func NewResourceManager(k8sClient kubernetes.Interface) (*ResourceManager, error) {
|
||||
func NewResourceManager(podLister corev1listers.PodLister, secretLister corev1listers.SecretLister, configMapLister corev1listers.ConfigMapLister) (*ResourceManager, error) {
|
||||
rm := ResourceManager{
|
||||
pods: make(map[string]*v1.Pod, 0),
|
||||
deletingPods: make(map[string]*v1.Pod, 0),
|
||||
configMapRef: make(map[string]int64, 0),
|
||||
secretRef: make(map[string]int64, 0),
|
||||
configMaps: make(map[string]*v1.ConfigMap, 0),
|
||||
secrets: make(map[string]*v1.Secret, 0),
|
||||
k8sClient: k8sClient,
|
||||
podLister: podLister,
|
||||
secretLister: secretLister,
|
||||
configMapLister: configMapLister,
|
||||
}
|
||||
|
||||
configW, err := rm.k8sClient.CoreV1().ConfigMaps(v1.NamespaceAll).Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting config watch")
|
||||
}
|
||||
|
||||
secretsW, err := rm.k8sClient.CoreV1().Secrets(v1.NamespaceAll).Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting secrets watch")
|
||||
}
|
||||
|
||||
go rm.watchConfigMaps(configW)
|
||||
go rm.watchSecrets(secretsW)
|
||||
|
||||
tick := time.Tick(5 * time.Minute)
|
||||
go func() {
|
||||
for range tick {
|
||||
rm.Lock()
|
||||
for n, c := range rm.secretRef {
|
||||
if c <= 0 {
|
||||
delete(rm.secretRef, n)
|
||||
}
|
||||
}
|
||||
for n := range rm.secrets {
|
||||
if _, ok := rm.secretRef[n]; !ok {
|
||||
delete(rm.secrets, n)
|
||||
}
|
||||
}
|
||||
for n, c := range rm.configMapRef {
|
||||
if c <= 0 {
|
||||
delete(rm.configMapRef, n)
|
||||
}
|
||||
}
|
||||
for n := range rm.configMaps {
|
||||
if _, ok := rm.configMapRef[n]; !ok {
|
||||
delete(rm.configMaps, n)
|
||||
}
|
||||
}
|
||||
rm.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
return &rm, nil
|
||||
}
|
||||
|
||||
// SetPods clears the internal cache and populates it with the supplied pods.
|
||||
func (rm *ResourceManager) SetPods(pods *v1.PodList) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
for k, p := range pods.Items {
|
||||
podKey := rm.getStoreKey(p.Namespace, p.Name)
|
||||
if p.DeletionTimestamp != nil {
|
||||
rm.deletingPods[podKey] = &pods.Items[k]
|
||||
} else {
|
||||
rm.pods[podKey] = &pods.Items[k]
|
||||
rm.incrementRefCounters(&p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdatePod updates the supplied pod in the cache.
|
||||
func (rm *ResourceManager) UpdatePod(p *v1.Pod) bool {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
podKey := rm.getStoreKey(p.Namespace, p.Name)
|
||||
if p.DeletionTimestamp != nil {
|
||||
if old, ok := rm.pods[podKey]; ok {
|
||||
rm.deletingPods[podKey] = p
|
||||
|
||||
rm.decrementRefCounters(old)
|
||||
delete(rm.pods, podKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := rm.deletingPods[podKey]; !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if old, ok := rm.pods[podKey]; ok {
|
||||
rm.decrementRefCounters(old)
|
||||
rm.pods[podKey] = p
|
||||
rm.incrementRefCounters(p)
|
||||
|
||||
// NOTE(junjiez): no reconcile as we don't support update pod.
|
||||
return false
|
||||
}
|
||||
|
||||
rm.pods[podKey] = p
|
||||
rm.incrementRefCounters(p)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DeletePod removes the pod from the cache.
|
||||
func (rm *ResourceManager) DeletePod(p *v1.Pod) bool {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
podKey := rm.getStoreKey(p.Namespace, p.Name)
|
||||
if old, ok := rm.pods[podKey]; ok {
|
||||
rm.decrementRefCounters(old)
|
||||
delete(rm.pods, podKey)
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := rm.deletingPods[podKey]; ok {
|
||||
delete(rm.deletingPods, podKey)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetPod retrieves the specified pod from the cache. It returns nil if a pod is not found.
|
||||
func (rm *ResourceManager) GetPod(namespace, name string) *v1.Pod {
|
||||
rm.RLock()
|
||||
defer rm.RUnlock()
|
||||
|
||||
if p, ok := rm.pods[rm.getStoreKey(namespace, name)]; ok {
|
||||
return p
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPods returns a list of all known pods assigned to this virtual node.
|
||||
func (rm *ResourceManager) GetPods() []*v1.Pod {
|
||||
rm.RLock()
|
||||
defer rm.RUnlock()
|
||||
|
||||
pods := make([]*v1.Pod, 0, len(rm.pods)+len(rm.deletingPods))
|
||||
for _, p := range rm.pods {
|
||||
pods = append(pods, p)
|
||||
l, err := rm.podLister.List(labels.Everything())
|
||||
if err == nil {
|
||||
return l
|
||||
}
|
||||
for _, p := range rm.deletingPods {
|
||||
pods = append(pods, p)
|
||||
}
|
||||
|
||||
return pods
|
||||
log.L.Errorf("failed to fetch pods from lister: %v", err)
|
||||
return make([]*v1.Pod, 0)
|
||||
}
|
||||
|
||||
// GetConfigMap returns the specified ConfigMap from Kubernetes. It retrieves it from cache if there
|
||||
// GetConfigMap retrieves the specified config map from the cache.
|
||||
func (rm *ResourceManager) GetConfigMap(name, namespace string) (*v1.ConfigMap, error) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
configMapKey := rm.getStoreKey(namespace, name)
|
||||
if cm, ok := rm.configMaps[configMapKey]; ok {
|
||||
return cm, nil
|
||||
}
|
||||
|
||||
var opts metav1.GetOptions
|
||||
cm, err := rm.k8sClient.CoreV1().ConfigMaps(namespace).Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rm.configMaps[configMapKey] = cm
|
||||
|
||||
return cm, err
|
||||
return rm.configMapLister.ConfigMaps(namespace).Get(name)
|
||||
}
|
||||
|
||||
// GetSecret returns the specified ConfigMap from Kubernetes. It retrieves it from cache if there
|
||||
// GetSecret retrieves the specified secret from Kubernetes.
|
||||
func (rm *ResourceManager) GetSecret(name, namespace string) (*v1.Secret, error) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
secretkey := rm.getStoreKey(namespace, name)
|
||||
if secret, ok := rm.secrets[secretkey]; ok {
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
var opts metav1.GetOptions
|
||||
secret, err := rm.k8sClient.CoreV1().Secrets(namespace).Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rm.secrets[secretkey] = secret
|
||||
|
||||
return secret, err
|
||||
}
|
||||
|
||||
// watchConfigMaps monitors the kubernetes API for modifications and deletions of configmaps
|
||||
// it evicts them from the internal cache
|
||||
func (rm *ResourceManager) watchConfigMaps(w watch.Interface) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
rm.Lock()
|
||||
configMapkey := rm.getStoreKey(ev.Object.(*v1.ConfigMap).Namespace, ev.Object.(*v1.ConfigMap).Name)
|
||||
switch ev.Type {
|
||||
case watch.Modified:
|
||||
delete(rm.configMaps, configMapkey)
|
||||
case watch.Deleted:
|
||||
delete(rm.configMaps, configMapkey)
|
||||
}
|
||||
rm.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// watchSecretes monitors the kubernetes API for modifications and deletions of secrets
|
||||
// it evicts them from the internal cache
|
||||
func (rm *ResourceManager) watchSecrets(w watch.Interface) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
rm.Lock()
|
||||
secretKey := rm.getStoreKey(ev.Object.(*v1.Secret).Namespace, ev.Object.(*v1.Secret).Name)
|
||||
switch ev.Type {
|
||||
case watch.Modified:
|
||||
delete(rm.secrets, secretKey)
|
||||
case watch.Deleted:
|
||||
delete(rm.secrets, secretKey)
|
||||
}
|
||||
rm.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rm *ResourceManager) incrementRefCounters(p *v1.Pod) {
|
||||
for _, c := range p.Spec.Containers {
|
||||
for _, e := range c.Env {
|
||||
if e.ValueFrom != nil && e.ValueFrom.ConfigMapKeyRef != nil {
|
||||
configMapKey := rm.getStoreKey(p.Namespace, e.ValueFrom.ConfigMapKeyRef.Name)
|
||||
rm.configMapRef[configMapKey]++
|
||||
}
|
||||
|
||||
if e.ValueFrom != nil && e.ValueFrom.SecretKeyRef != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, e.ValueFrom.SecretKeyRef.Name)
|
||||
rm.secretRef[secretKey]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range p.Spec.Volumes {
|
||||
if v.VolumeSource.Secret != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, v.VolumeSource.Secret.SecretName)
|
||||
rm.secretRef[secretKey]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rm *ResourceManager) decrementRefCounters(p *v1.Pod) {
|
||||
for _, c := range p.Spec.Containers {
|
||||
for _, e := range c.Env {
|
||||
if e.ValueFrom != nil && e.ValueFrom.ConfigMapKeyRef != nil {
|
||||
configMapKey := rm.getStoreKey(p.Namespace, e.ValueFrom.ConfigMapKeyRef.Name)
|
||||
rm.configMapRef[configMapKey]--
|
||||
}
|
||||
|
||||
if e.ValueFrom != nil && e.ValueFrom.SecretKeyRef != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, e.ValueFrom.SecretKeyRef.Name)
|
||||
rm.secretRef[secretKey]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range p.Spec.Volumes {
|
||||
if v.VolumeSource.Secret != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, v.VolumeSource.Secret.SecretName)
|
||||
rm.secretRef[secretKey]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getStoreKey return the key with namespace for store objects from different namespaces
|
||||
func (rm *ResourceManager) getStoreKey(namespace, name string) string {
|
||||
return namespace + "_" + name
|
||||
return rm.secretLister.Secrets(namespace).Get(name)
|
||||
}
|
||||
|
||||
@@ -3,100 +3,156 @@ package manager
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
var (
|
||||
fakeClient kubernetes.Interface
|
||||
)
|
||||
// TestGetPods verifies that the resource manager acts as a passthrough to a pod lister.
|
||||
func TestGetPods(t *testing.T) {
|
||||
var (
|
||||
lsPods = []*v1.Pod{
|
||||
makePod("namespace-0", "name-0", "image-0"),
|
||||
makePod("namespace-1", "name-1", "image-1"),
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
fakeClient = fake.NewSimpleClientset()
|
||||
}
|
||||
// Create a pod lister that will list the pods defined above.
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
for _, pod := range lsPods {
|
||||
indexer.Add(pod)
|
||||
}
|
||||
podLister := corev1listers.NewPodLister(indexer)
|
||||
|
||||
func TestResourceManager(t *testing.T) {
|
||||
pm, err := NewResourceManager(fakeClient)
|
||||
// Create a new instance of the resource manager based on the pod lister.
|
||||
rm, err := NewResourceManager(podLister, nil, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod1Name := "Pod1"
|
||||
pod1Namespace := "Pod1Namespace"
|
||||
pod1 := makePod(pod1Namespace, pod1Name)
|
||||
pm.UpdatePod(pod1)
|
||||
|
||||
pods := pm.GetPods()
|
||||
if len(pods) != 1 {
|
||||
t.Errorf("Got %d, expected 1 pod", len(pods))
|
||||
}
|
||||
gotPod1 := pm.GetPod(pod1Namespace, pod1Name)
|
||||
if gotPod1.Name != pod1.Name {
|
||||
t.Errorf("Got %s, wanted %s", gotPod1.Name, pod1.Name)
|
||||
// Check that the resource manager returns two pods in the call to "GetPods".
|
||||
rmPods := rm.GetPods()
|
||||
if len(rmPods) != len(lsPods) {
|
||||
t.Fatalf("expected %d pods, found %d", len(lsPods), len(rmPods))
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceManagerDeletePod(t *testing.T) {
|
||||
pm, err := NewResourceManager(fakeClient)
|
||||
// TestGetSecret verifies that the resource manager acts as a passthrough to a secret lister.
|
||||
func TestGetSecret(t *testing.T) {
|
||||
var (
|
||||
lsSecrets = []*v1.Secret{
|
||||
makeSecret("namespace-0", "name-0", "key-0", "val-0"),
|
||||
makeSecret("namespace-1", "name-1", "key-1", "val-1"),
|
||||
}
|
||||
)
|
||||
|
||||
// Create a secret lister that will list the secrets defined above.
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
for _, secret := range lsSecrets {
|
||||
indexer.Add(secret)
|
||||
}
|
||||
secretLister := corev1listers.NewSecretLister(indexer)
|
||||
|
||||
// Create a new instance of the resource manager based on the secret lister.
|
||||
rm, err := NewResourceManager(nil, secretLister, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod1Name := "Pod1"
|
||||
pod1Namespace := "Pod1Namespace"
|
||||
pod1 := makePod(pod1Namespace, pod1Name)
|
||||
pm.UpdatePod(pod1)
|
||||
pods := pm.GetPods()
|
||||
if len(pods) != 1 {
|
||||
t.Errorf("Got %d, expected 1 pod", len(pods))
|
||||
}
|
||||
pm.DeletePod(pod1)
|
||||
|
||||
pods = pm.GetPods()
|
||||
if len(pods) != 0 {
|
||||
t.Errorf("Got %d, expected 0 pods", len(pods))
|
||||
}
|
||||
}
|
||||
func makePod(namespace, name string) *v1.Pod {
|
||||
pod := &v1.Pod{}
|
||||
pod.Name = name
|
||||
pod.Namespace = namespace
|
||||
pod.UID = types.UID(uuid.New().String())
|
||||
return pod
|
||||
}
|
||||
|
||||
func TestResourceManagerUpdatePod(t *testing.T) {
|
||||
pm, err := NewResourceManager(fakeClient)
|
||||
// Get the secret with coordinates "namespace-0/name-0".
|
||||
secret, err := rm.GetSecret("name-0", "namespace-0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pod1Name := "Pod1"
|
||||
pod1Namespace := "Pod1Namespace"
|
||||
pod1 := makePod(pod1Namespace, pod1Name)
|
||||
pm.UpdatePod(pod1)
|
||||
|
||||
pods := pm.GetPods()
|
||||
if len(pods) != 1 {
|
||||
t.Errorf("Got %d, expected 1 pod", len(pods))
|
||||
}
|
||||
gotPod1 := pm.GetPod(pod1Namespace, pod1Name)
|
||||
if gotPod1.Name != pod1.Name {
|
||||
t.Errorf("Got %s, wanted %s", gotPod1.Name, pod1.Name)
|
||||
value := secret.Data["key-0"]
|
||||
if string(value) != "val-0" {
|
||||
t.Fatal("got unexpected value", string(value))
|
||||
}
|
||||
|
||||
if gotPod1.Namespace != pod1.Namespace {
|
||||
t.Errorf("Got %s, wanted %s", gotPod1.Namespace, pod1.Namespace)
|
||||
}
|
||||
pod1.Namespace = "POD2NAMESPACE"
|
||||
pm.UpdatePod(pod1)
|
||||
|
||||
gotPod1 = pm.GetPod(pod1Namespace, pod1Name)
|
||||
if gotPod1.Name != pod1.Name {
|
||||
t.Errorf("Got %s, wanted %s", gotPod1.Name, pod1.Name)
|
||||
}
|
||||
|
||||
if gotPod1.Namespace != pod1.Namespace {
|
||||
t.Errorf("Got %s, wanted %s", gotPod1.Namespace, pod1.Namespace)
|
||||
// Try to get a secret that does not exist, and make sure we've got a "not found" error as a response.
|
||||
_, err = rm.GetSecret("name-X", "namespace-X")
|
||||
if err == nil || !errors.IsNotFound(err) {
|
||||
t.Fatalf("expected a 'not found' error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetConfigMap verifies that the resource manager acts as a passthrough to a config map lister.
|
||||
func TestGetConfigMap(t *testing.T) {
|
||||
var (
|
||||
lsConfigMaps = []*v1.ConfigMap{
|
||||
makeConfigMap("namespace-0", "name-0", "key-0", "val-0"),
|
||||
makeConfigMap("namespace-1", "name-1", "key-1", "val-1"),
|
||||
}
|
||||
)
|
||||
|
||||
// Create a config map lister that will list the config maps defined above.
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
for _, secret := range lsConfigMaps {
|
||||
indexer.Add(secret)
|
||||
}
|
||||
configMapLister := corev1listers.NewConfigMapLister(indexer)
|
||||
|
||||
// Create a new instance of the resource manager based on the config map lister.
|
||||
rm, err := NewResourceManager(nil, nil, configMapLister)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Get the config map with coordinates "namespace-0/name-0".
|
||||
configMap, err := rm.GetConfigMap("name-0", "namespace-0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
value := configMap.Data["key-0"]
|
||||
if value != "val-0" {
|
||||
t.Fatal("got unexpected value", string(value))
|
||||
}
|
||||
|
||||
// Try to get a configmap that does not exist, and make sure we've got a "not found" error as a response.
|
||||
_, err = rm.GetConfigMap("name-X", "namespace-X")
|
||||
if err == nil || !errors.IsNotFound(err) {
|
||||
t.Fatalf("expected a 'not found' error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func makeConfigMap(namespace, name, key, value string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
key: value,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makePod(namespace, name, image string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: image,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeSecret(namespace, name, key, value string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
key: []byte(value),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user