Add the ability to dictate custom retries
Our current retry policy is naive and only does 20 retries. It is also based off of the rate limiter. If the user is somewhat aggressive in rate limiting, but they have a temporary outage on API server, they may want to continue to delay. In facts, K8s has a built-in function to suggest delays: https://pkg.go.dev/k8s.io/apimachinery/pkg/api/errors#SuggestsClientDelay Signed-off-by: Sargun Dhillon <sargun@sargun.me>
This commit is contained in:
@@ -20,12 +20,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/queue"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/errdefs"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/internal/queue"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/trace"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -178,10 +177,18 @@ type PodControllerConfig struct {
|
||||
|
||||
// SyncPodsFromKubernetesRateLimiter defines the rate limit for the SyncPodsFromKubernetes queue
|
||||
SyncPodsFromKubernetesRateLimiter workqueue.RateLimiter
|
||||
// SyncPodsFromKubernetesShouldRetryFunc allows for a custom retry policy for the SyncPodsFromKubernetes queue
|
||||
SyncPodsFromKubernetesShouldRetryFunc ShouldRetryFunc
|
||||
|
||||
// DeletePodsFromKubernetesRateLimiter defines the rate limit for the DeletePodsFromKubernetesRateLimiter queue
|
||||
DeletePodsFromKubernetesRateLimiter workqueue.RateLimiter
|
||||
// DeletePodsFromKubernetesShouldRetryFunc allows for a custom retry policy for the SyncPodsFromKubernetes queue
|
||||
DeletePodsFromKubernetesShouldRetryFunc ShouldRetryFunc
|
||||
|
||||
// SyncPodStatusFromProviderRateLimiter defines the rate limit for the SyncPodStatusFromProviderRateLimiter queue
|
||||
SyncPodStatusFromProviderRateLimiter workqueue.RateLimiter
|
||||
// SyncPodStatusFromProviderShouldRetryFunc allows for a custom retry policy for the SyncPodStatusFromProvider queue
|
||||
SyncPodStatusFromProviderShouldRetryFunc ShouldRetryFunc
|
||||
|
||||
// Add custom filtering for pod informer event handlers
|
||||
// Use this for cases where the pod informer handles more than pods assigned to this node
|
||||
@@ -240,9 +247,9 @@ func NewPodController(cfg PodControllerConfig) (*PodController, error) {
|
||||
podEventFilterFunc: cfg.PodEventFilterFunc,
|
||||
}
|
||||
|
||||
pc.syncPodsFromKubernetes = queue.New(cfg.SyncPodsFromKubernetesRateLimiter, "syncPodsFromKubernetes", pc.syncPodFromKubernetesHandler)
|
||||
pc.deletePodsFromKubernetes = queue.New(cfg.DeletePodsFromKubernetesRateLimiter, "deletePodsFromKubernetes", pc.deletePodsFromKubernetesHandler)
|
||||
pc.syncPodStatusFromProvider = queue.New(cfg.SyncPodStatusFromProviderRateLimiter, "syncPodStatusFromProvider", pc.syncPodStatusFromProviderHandler)
|
||||
pc.syncPodsFromKubernetes = queue.New(cfg.SyncPodsFromKubernetesRateLimiter, "syncPodsFromKubernetes", pc.syncPodFromKubernetesHandler, cfg.SyncPodsFromKubernetesShouldRetryFunc)
|
||||
pc.deletePodsFromKubernetes = queue.New(cfg.DeletePodsFromKubernetesRateLimiter, "deletePodsFromKubernetes", pc.deletePodsFromKubernetesHandler, cfg.DeletePodsFromKubernetesShouldRetryFunc)
|
||||
pc.syncPodStatusFromProvider = queue.New(cfg.SyncPodStatusFromProviderRateLimiter, "syncPodStatusFromProvider", pc.syncPodStatusFromProviderHandler, cfg.SyncPodStatusFromProviderShouldRetryFunc)
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user