use shared informers and workqueue (#425)
* vendor: add vendored code
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* controller: use shared informers and a work queue
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* errors: use cpuguy83/strongerrors
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* aci: fix test that uses resource manager
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* readme: clarify skaffold run before e2e
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* cmd: use root context everywhere
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: refactor pod lifecycle management
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* e2e: fix race in test when observing deletions
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* e2e: test pod forced deletion
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* cmd: fix root context potential leak
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: rename metaKey
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: remove calls to HandleError
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* Revert "errors: use cpuguy83/strongerrors"
This reverts commit f031fc6d.
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* manager: remove redundant lister constraint
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: rename the pod event recorder
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: amend misleading comment
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* mock: add tracing
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: add tracing
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* test: observe timeouts
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* trace: remove unnecessary comments
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: limit concurrency in deleteDanglingPods
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: never store context, always pass in calls
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: remove HandleCrash and just panic
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: don't sync succeeded pods
Signed-off-by: Paulo Pires <pjpires@gmail.com>
* sync: ensure pod deletion from kubernetes
Signed-off-by: Paulo Pires <pjpires@gmail.com>
This commit is contained in:
committed by
Robbie Zhang
parent
0e9cfca585
commit
28a757f4da
@@ -2,321 +2,50 @@ package manager
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
)
|
||||
|
||||
// ResourceManager works a cache for pods assigned to this virtual node within Kubernetes.
|
||||
// New ResourceManagers should be created with the NewResourceManager() function.
|
||||
// ResourceManager acts as a passthrough to a cache (lister) for pods assigned to the current node.
|
||||
// It is also a passthrough to a cache (lister) for Kubernetes secrets and config maps.
|
||||
type ResourceManager struct {
|
||||
sync.RWMutex
|
||||
k8sClient kubernetes.Interface
|
||||
|
||||
pods map[string]*v1.Pod
|
||||
deletingPods map[string]*v1.Pod
|
||||
configMapRef map[string]int64
|
||||
configMaps map[string]*v1.ConfigMap
|
||||
secretRef map[string]int64
|
||||
secrets map[string]*v1.Secret
|
||||
podLister corev1listers.PodLister
|
||||
secretLister corev1listers.SecretLister
|
||||
configMapLister corev1listers.ConfigMapLister
|
||||
}
|
||||
|
||||
// NewResourceManager returns a ResourceManager with the internal maps initialized.
|
||||
func NewResourceManager(k8sClient kubernetes.Interface) (*ResourceManager, error) {
|
||||
func NewResourceManager(podLister corev1listers.PodLister, secretLister corev1listers.SecretLister, configMapLister corev1listers.ConfigMapLister) (*ResourceManager, error) {
|
||||
rm := ResourceManager{
|
||||
pods: make(map[string]*v1.Pod, 0),
|
||||
deletingPods: make(map[string]*v1.Pod, 0),
|
||||
configMapRef: make(map[string]int64, 0),
|
||||
secretRef: make(map[string]int64, 0),
|
||||
configMaps: make(map[string]*v1.ConfigMap, 0),
|
||||
secrets: make(map[string]*v1.Secret, 0),
|
||||
k8sClient: k8sClient,
|
||||
podLister: podLister,
|
||||
secretLister: secretLister,
|
||||
configMapLister: configMapLister,
|
||||
}
|
||||
|
||||
configW, err := rm.k8sClient.CoreV1().ConfigMaps(v1.NamespaceAll).Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting config watch")
|
||||
}
|
||||
|
||||
secretsW, err := rm.k8sClient.CoreV1().Secrets(v1.NamespaceAll).Watch(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error getting secrets watch")
|
||||
}
|
||||
|
||||
go rm.watchConfigMaps(configW)
|
||||
go rm.watchSecrets(secretsW)
|
||||
|
||||
tick := time.Tick(5 * time.Minute)
|
||||
go func() {
|
||||
for range tick {
|
||||
rm.Lock()
|
||||
for n, c := range rm.secretRef {
|
||||
if c <= 0 {
|
||||
delete(rm.secretRef, n)
|
||||
}
|
||||
}
|
||||
for n := range rm.secrets {
|
||||
if _, ok := rm.secretRef[n]; !ok {
|
||||
delete(rm.secrets, n)
|
||||
}
|
||||
}
|
||||
for n, c := range rm.configMapRef {
|
||||
if c <= 0 {
|
||||
delete(rm.configMapRef, n)
|
||||
}
|
||||
}
|
||||
for n := range rm.configMaps {
|
||||
if _, ok := rm.configMapRef[n]; !ok {
|
||||
delete(rm.configMaps, n)
|
||||
}
|
||||
}
|
||||
rm.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
return &rm, nil
|
||||
}
|
||||
|
||||
// SetPods clears the internal cache and populates it with the supplied pods.
|
||||
func (rm *ResourceManager) SetPods(pods *v1.PodList) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
for k, p := range pods.Items {
|
||||
podKey := rm.getStoreKey(p.Namespace, p.Name)
|
||||
if p.DeletionTimestamp != nil {
|
||||
rm.deletingPods[podKey] = &pods.Items[k]
|
||||
} else {
|
||||
rm.pods[podKey] = &pods.Items[k]
|
||||
rm.incrementRefCounters(&p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdatePod updates the supplied pod in the cache.
|
||||
func (rm *ResourceManager) UpdatePod(p *v1.Pod) bool {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
podKey := rm.getStoreKey(p.Namespace, p.Name)
|
||||
if p.DeletionTimestamp != nil {
|
||||
if old, ok := rm.pods[podKey]; ok {
|
||||
rm.deletingPods[podKey] = p
|
||||
|
||||
rm.decrementRefCounters(old)
|
||||
delete(rm.pods, podKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := rm.deletingPods[podKey]; !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if old, ok := rm.pods[podKey]; ok {
|
||||
rm.decrementRefCounters(old)
|
||||
rm.pods[podKey] = p
|
||||
rm.incrementRefCounters(p)
|
||||
|
||||
// NOTE(junjiez): no reconcile as we don't support update pod.
|
||||
return false
|
||||
}
|
||||
|
||||
rm.pods[podKey] = p
|
||||
rm.incrementRefCounters(p)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// DeletePod removes the pod from the cache.
|
||||
func (rm *ResourceManager) DeletePod(p *v1.Pod) bool {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
podKey := rm.getStoreKey(p.Namespace, p.Name)
|
||||
if old, ok := rm.pods[podKey]; ok {
|
||||
rm.decrementRefCounters(old)
|
||||
delete(rm.pods, podKey)
|
||||
return true
|
||||
}
|
||||
|
||||
if _, ok := rm.deletingPods[podKey]; ok {
|
||||
delete(rm.deletingPods, podKey)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// GetPod retrieves the specified pod from the cache. It returns nil if a pod is not found.
|
||||
func (rm *ResourceManager) GetPod(namespace, name string) *v1.Pod {
|
||||
rm.RLock()
|
||||
defer rm.RUnlock()
|
||||
|
||||
if p, ok := rm.pods[rm.getStoreKey(namespace, name)]; ok {
|
||||
return p
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPods returns a list of all known pods assigned to this virtual node.
|
||||
func (rm *ResourceManager) GetPods() []*v1.Pod {
|
||||
rm.RLock()
|
||||
defer rm.RUnlock()
|
||||
|
||||
pods := make([]*v1.Pod, 0, len(rm.pods)+len(rm.deletingPods))
|
||||
for _, p := range rm.pods {
|
||||
pods = append(pods, p)
|
||||
l, err := rm.podLister.List(labels.Everything())
|
||||
if err == nil {
|
||||
return l
|
||||
}
|
||||
for _, p := range rm.deletingPods {
|
||||
pods = append(pods, p)
|
||||
}
|
||||
|
||||
return pods
|
||||
log.L.Errorf("failed to fetch pods from lister: %v", err)
|
||||
return make([]*v1.Pod, 0)
|
||||
}
|
||||
|
||||
// GetConfigMap returns the specified ConfigMap from Kubernetes. It retrieves it from cache if there
|
||||
// GetConfigMap retrieves the specified config map from the cache.
|
||||
func (rm *ResourceManager) GetConfigMap(name, namespace string) (*v1.ConfigMap, error) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
configMapKey := rm.getStoreKey(namespace, name)
|
||||
if cm, ok := rm.configMaps[configMapKey]; ok {
|
||||
return cm, nil
|
||||
}
|
||||
|
||||
var opts metav1.GetOptions
|
||||
cm, err := rm.k8sClient.CoreV1().ConfigMaps(namespace).Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rm.configMaps[configMapKey] = cm
|
||||
|
||||
return cm, err
|
||||
return rm.configMapLister.ConfigMaps(namespace).Get(name)
|
||||
}
|
||||
|
||||
// GetSecret returns the specified ConfigMap from Kubernetes. It retrieves it from cache if there
|
||||
// GetSecret retrieves the specified secret from Kubernetes.
|
||||
func (rm *ResourceManager) GetSecret(name, namespace string) (*v1.Secret, error) {
|
||||
rm.Lock()
|
||||
defer rm.Unlock()
|
||||
|
||||
secretkey := rm.getStoreKey(namespace, name)
|
||||
if secret, ok := rm.secrets[secretkey]; ok {
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
var opts metav1.GetOptions
|
||||
secret, err := rm.k8sClient.CoreV1().Secrets(namespace).Get(name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rm.secrets[secretkey] = secret
|
||||
|
||||
return secret, err
|
||||
}
|
||||
|
||||
// watchConfigMaps monitors the kubernetes API for modifications and deletions of configmaps
|
||||
// it evicts them from the internal cache
|
||||
func (rm *ResourceManager) watchConfigMaps(w watch.Interface) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
rm.Lock()
|
||||
configMapkey := rm.getStoreKey(ev.Object.(*v1.ConfigMap).Namespace, ev.Object.(*v1.ConfigMap).Name)
|
||||
switch ev.Type {
|
||||
case watch.Modified:
|
||||
delete(rm.configMaps, configMapkey)
|
||||
case watch.Deleted:
|
||||
delete(rm.configMaps, configMapkey)
|
||||
}
|
||||
rm.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// watchSecretes monitors the kubernetes API for modifications and deletions of secrets
|
||||
// it evicts them from the internal cache
|
||||
func (rm *ResourceManager) watchSecrets(w watch.Interface) {
|
||||
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
rm.Lock()
|
||||
secretKey := rm.getStoreKey(ev.Object.(*v1.Secret).Namespace, ev.Object.(*v1.Secret).Name)
|
||||
switch ev.Type {
|
||||
case watch.Modified:
|
||||
delete(rm.secrets, secretKey)
|
||||
case watch.Deleted:
|
||||
delete(rm.secrets, secretKey)
|
||||
}
|
||||
rm.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rm *ResourceManager) incrementRefCounters(p *v1.Pod) {
|
||||
for _, c := range p.Spec.Containers {
|
||||
for _, e := range c.Env {
|
||||
if e.ValueFrom != nil && e.ValueFrom.ConfigMapKeyRef != nil {
|
||||
configMapKey := rm.getStoreKey(p.Namespace, e.ValueFrom.ConfigMapKeyRef.Name)
|
||||
rm.configMapRef[configMapKey]++
|
||||
}
|
||||
|
||||
if e.ValueFrom != nil && e.ValueFrom.SecretKeyRef != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, e.ValueFrom.SecretKeyRef.Name)
|
||||
rm.secretRef[secretKey]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range p.Spec.Volumes {
|
||||
if v.VolumeSource.Secret != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, v.VolumeSource.Secret.SecretName)
|
||||
rm.secretRef[secretKey]++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rm *ResourceManager) decrementRefCounters(p *v1.Pod) {
|
||||
for _, c := range p.Spec.Containers {
|
||||
for _, e := range c.Env {
|
||||
if e.ValueFrom != nil && e.ValueFrom.ConfigMapKeyRef != nil {
|
||||
configMapKey := rm.getStoreKey(p.Namespace, e.ValueFrom.ConfigMapKeyRef.Name)
|
||||
rm.configMapRef[configMapKey]--
|
||||
}
|
||||
|
||||
if e.ValueFrom != nil && e.ValueFrom.SecretKeyRef != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, e.ValueFrom.SecretKeyRef.Name)
|
||||
rm.secretRef[secretKey]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range p.Spec.Volumes {
|
||||
if v.VolumeSource.Secret != nil {
|
||||
secretKey := rm.getStoreKey(p.Namespace, v.VolumeSource.Secret.SecretName)
|
||||
rm.secretRef[secretKey]--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getStoreKey return the key with namespace for store objects from different namespaces
|
||||
func (rm *ResourceManager) getStoreKey(namespace, name string) string {
|
||||
return namespace + "_" + name
|
||||
return rm.secretLister.Secrets(namespace).Get(name)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user