Expose the queue rate limiter
This commit is contained in:
@@ -87,9 +87,6 @@ func installFlags(flags *pflag.FlagSet, c *Opts) {
|
|||||||
flags.DurationVar(&c.StreamCreationTimeout, "stream-creation-timeout", c.StreamCreationTimeout,
|
flags.DurationVar(&c.StreamCreationTimeout, "stream-creation-timeout", c.StreamCreationTimeout,
|
||||||
"stream-creation-timeout is the maximum time for streaming connection, default 30s.")
|
"stream-creation-timeout is the maximum time for streaming connection, default 30s.")
|
||||||
|
|
||||||
flags.IntVar(&c.WorkQueueRetryQPS, "workqueue-retry-qps", c.WorkQueueRetryQPS,
|
|
||||||
"workqueue-retry-qps is the rate limit when objects in a workqueue, default 10.")
|
|
||||||
|
|
||||||
flagset := flag.NewFlagSet("klog", flag.PanicOnError)
|
flagset := flag.NewFlagSet("klog", flag.PanicOnError)
|
||||||
klog.InitFlags(flagset)
|
klog.InitFlags(flagset)
|
||||||
flagset.VisitAll(func(f *flag.Flag) {
|
flagset.VisitAll(func(f *flag.Flag) {
|
||||||
|
|||||||
@@ -40,7 +40,6 @@ const (
|
|||||||
DefaultTaintKey = "virtual-kubelet.io/provider"
|
DefaultTaintKey = "virtual-kubelet.io/provider"
|
||||||
DefaultStreamIdleTimeout = 30 * time.Second
|
DefaultStreamIdleTimeout = 30 * time.Second
|
||||||
DefaultStreamCreationTimeout = 30 * time.Second
|
DefaultStreamCreationTimeout = 30 * time.Second
|
||||||
DefaultWorkQueueRetryQPS = 10
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Opts stores all the options for configuring the root virtual-kubelet command.
|
// Opts stores all the options for configuring the root virtual-kubelet command.
|
||||||
@@ -93,9 +92,6 @@ type Opts struct {
|
|||||||
// StreamCreationTimeout is the maximum time for streaming connection
|
// StreamCreationTimeout is the maximum time for streaming connection
|
||||||
StreamCreationTimeout time.Duration
|
StreamCreationTimeout time.Duration
|
||||||
|
|
||||||
// WorkQueueRetryQPS is the default qps limit when retry for k8s workqueue
|
|
||||||
WorkQueueRetryQPS int
|
|
||||||
|
|
||||||
Version string
|
Version string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -171,9 +167,5 @@ func SetDefaultOpts(c *Opts) error {
|
|||||||
c.StreamCreationTimeout = DefaultStreamCreationTimeout
|
c.StreamCreationTimeout = DefaultStreamCreationTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.WorkQueueRetryQPS == 0 {
|
|
||||||
c.WorkQueueRetryQPS = DefaultWorkQueueRetryQPS
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -185,7 +185,6 @@ func runRootCommand(ctx context.Context, s *provider.Store, c Opts) error {
|
|||||||
SecretInformer: secretInformer,
|
SecretInformer: secretInformer,
|
||||||
ConfigMapInformer: configMapInformer,
|
ConfigMapInformer: configMapInformer,
|
||||||
ServiceInformer: serviceInformer,
|
ServiceInformer: serviceInformer,
|
||||||
WorkQueueRetryQPS: c.WorkQueueRetryQPS,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error setting up pod controller")
|
return errors.Wrap(err, "error setting up pod controller")
|
||||||
|
|||||||
1
go.mod
1
go.mod
@@ -21,7 +21,6 @@ require (
|
|||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
go.opencensus.io v0.21.0
|
go.opencensus.io v0.21.0
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
|
|
||||||
gotest.tools v2.2.0+incompatible
|
gotest.tools v2.2.0+incompatible
|
||||||
k8s.io/api v0.0.0
|
k8s.io/api v0.0.0
|
||||||
k8s.io/apimachinery v0.0.0
|
k8s.io/apimachinery v0.0.0
|
||||||
|
|||||||
@@ -289,7 +289,6 @@ func wireUpSystem(ctx context.Context, provider PodLifecycleHandler, f testFunct
|
|||||||
ConfigMapInformer: configMapInformer,
|
ConfigMapInformer: configMapInformer,
|
||||||
SecretInformer: secretInformer,
|
SecretInformer: secretInformer,
|
||||||
ServiceInformer: serviceInformer,
|
ServiceInformer: serviceInformer,
|
||||||
WorkQueueRetryQPS: 10,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ func newTestController() *TestController {
|
|||||||
recorder: testutil.FakeEventRecorder(5),
|
recorder: testutil.FakeEventRecorder(5),
|
||||||
k8sQ: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
k8sQ: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||||
deletionQ: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
deletionQ: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||||
|
podStatusQ: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
ready: make(chan struct{}),
|
ready: make(chan struct{}),
|
||||||
podsInformer: iFactory.Core().V1().Pods(),
|
podsInformer: iFactory.Core().V1().Pods(),
|
||||||
|
|||||||
@@ -27,7 +27,6 @@ import (
|
|||||||
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
"github.com/virtual-kubelet/virtual-kubelet/internal/manager"
|
||||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||||
"github.com/virtual-kubelet/virtual-kubelet/trace"
|
"github.com/virtual-kubelet/virtual-kubelet/trace"
|
||||||
"golang.org/x/time/rate"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||||
@@ -109,6 +108,8 @@ type PodController struct {
|
|||||||
// deletionQ is a queue on which pods are reconciled, and we check if pods are in API server after grace period
|
// deletionQ is a queue on which pods are reconciled, and we check if pods are in API server after grace period
|
||||||
deletionQ workqueue.RateLimitingInterface
|
deletionQ workqueue.RateLimitingInterface
|
||||||
|
|
||||||
|
podStatusQ workqueue.RateLimitingInterface
|
||||||
|
|
||||||
// From the time of creation, to termination the knownPods map will contain the pods key
|
// From the time of creation, to termination the knownPods map will contain the pods key
|
||||||
// (derived from Kubernetes' cache library) -> a *knownPod struct.
|
// (derived from Kubernetes' cache library) -> a *knownPod struct.
|
||||||
knownPods sync.Map
|
knownPods sync.Map
|
||||||
@@ -128,8 +129,6 @@ type PodController struct {
|
|||||||
// This is used since `pc.Run()` is typically called in a goroutine and managing
|
// This is used since `pc.Run()` is typically called in a goroutine and managing
|
||||||
// this can be non-trivial for callers.
|
// this can be non-trivial for callers.
|
||||||
err error
|
err error
|
||||||
// qps is the default qps limit when retry
|
|
||||||
qps int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type knownPod struct {
|
type knownPod struct {
|
||||||
@@ -162,25 +161,12 @@ type PodControllerConfig struct {
|
|||||||
SecretInformer corev1informers.SecretInformer
|
SecretInformer corev1informers.SecretInformer
|
||||||
ServiceInformer corev1informers.ServiceInformer
|
ServiceInformer corev1informers.ServiceInformer
|
||||||
|
|
||||||
// WorkQueueRetryQPS is the default qps limit when retry
|
// RateLimiter defines the rate limit of work queue
|
||||||
WorkQueueRetryQPS int
|
RateLimiter workqueue.RateLimiter
|
||||||
}
|
|
||||||
|
|
||||||
// controllerRateLimiter has
|
|
||||||
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
|
|
||||||
func controllerRateLimiter(qps int) workqueue.RateLimiter {
|
|
||||||
return workqueue.NewMaxOfRateLimiter(
|
|
||||||
workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
|
|
||||||
// This is only for retry speed and its only the overall factor (not per item)
|
|
||||||
&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(qps), qps*10)},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPodController creates a new pod controller with the provided config.
|
// NewPodController creates a new pod controller with the provided config.
|
||||||
func NewPodController(cfg PodControllerConfig) (*PodController, error) {
|
func NewPodController(cfg PodControllerConfig) (*PodController, error) {
|
||||||
if cfg.WorkQueueRetryQPS == 0 {
|
|
||||||
cfg.WorkQueueRetryQPS = 10
|
|
||||||
}
|
|
||||||
if cfg.PodClient == nil {
|
if cfg.PodClient == nil {
|
||||||
return nil, errdefs.InvalidInput("missing core client")
|
return nil, errdefs.InvalidInput("missing core client")
|
||||||
}
|
}
|
||||||
@@ -202,7 +188,9 @@ func NewPodController(cfg PodControllerConfig) (*PodController, error) {
|
|||||||
if cfg.Provider == nil {
|
if cfg.Provider == nil {
|
||||||
return nil, errdefs.InvalidInput("missing provider")
|
return nil, errdefs.InvalidInput("missing provider")
|
||||||
}
|
}
|
||||||
|
if cfg.RateLimiter == nil {
|
||||||
|
cfg.RateLimiter = workqueue.DefaultControllerRateLimiter()
|
||||||
|
}
|
||||||
rm, err := manager.NewResourceManager(cfg.PodInformer.Lister(), cfg.SecretInformer.Lister(), cfg.ConfigMapInformer.Lister(), cfg.ServiceInformer.Lister())
|
rm, err := manager.NewResourceManager(cfg.PodInformer.Lister(), cfg.SecretInformer.Lister(), cfg.ConfigMapInformer.Lister(), cfg.ServiceInformer.Lister())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, pkgerrors.Wrap(err, "could not create resource manager")
|
return nil, pkgerrors.Wrap(err, "could not create resource manager")
|
||||||
@@ -217,9 +205,9 @@ func NewPodController(cfg PodControllerConfig) (*PodController, error) {
|
|||||||
ready: make(chan struct{}),
|
ready: make(chan struct{}),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
recorder: cfg.EventRecorder,
|
recorder: cfg.EventRecorder,
|
||||||
k8sQ: workqueue.NewNamedRateLimitingQueue(controllerRateLimiter(cfg.WorkQueueRetryQPS), "syncPodsFromKubernetes"),
|
k8sQ: workqueue.NewNamedRateLimitingQueue(cfg.RateLimiter, "syncPodsFromKubernetes"),
|
||||||
deletionQ: workqueue.NewNamedRateLimitingQueue(controllerRateLimiter(cfg.WorkQueueRetryQPS), "deletePodsFromKubernetes"),
|
deletionQ: workqueue.NewNamedRateLimitingQueue(cfg.RateLimiter, "deletePodsFromKubernetes"),
|
||||||
qps: cfg.WorkQueueRetryQPS,
|
podStatusQ: workqueue.NewNamedRateLimitingQueue(cfg.RateLimiter, "syncPodStatusFromProvider"),
|
||||||
}
|
}
|
||||||
|
|
||||||
return pc, nil
|
return pc, nil
|
||||||
@@ -262,13 +250,12 @@ func (pc *PodController) Run(ctx context.Context, podSyncWorkers int) (retErr er
|
|||||||
}
|
}
|
||||||
pc.provider = provider
|
pc.provider = provider
|
||||||
|
|
||||||
podStatusQueue := workqueue.NewNamedRateLimitingQueue(controllerRateLimiter(pc.qps), "syncPodStatusFromProvider")
|
|
||||||
provider.NotifyPods(ctx, func(pod *corev1.Pod) {
|
provider.NotifyPods(ctx, func(pod *corev1.Pod) {
|
||||||
pc.enqueuePodStatusUpdate(ctx, podStatusQueue, pod.DeepCopy())
|
pc.enqueuePodStatusUpdate(ctx, pc.podStatusQ, pod.DeepCopy())
|
||||||
})
|
})
|
||||||
go runProvider(ctx)
|
go runProvider(ctx)
|
||||||
|
|
||||||
defer podStatusQueue.ShutDown()
|
defer pc.podStatusQ.ShutDown()
|
||||||
|
|
||||||
// Wait for the caches to be synced *before* starting to do work.
|
// Wait for the caches to be synced *before* starting to do work.
|
||||||
if ok := cache.WaitForCacheSync(ctx.Done(), pc.podsInformer.Informer().HasSynced); !ok {
|
if ok := cache.WaitForCacheSync(ctx.Done(), pc.podsInformer.Informer().HasSynced); !ok {
|
||||||
@@ -328,7 +315,7 @@ func (pc *PodController) Run(ctx context.Context, podSyncWorkers int) (retErr er
|
|||||||
workerID := strconv.Itoa(id)
|
workerID := strconv.Itoa(id)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
pc.runSyncPodStatusFromProviderWorker(ctx, workerID, podStatusQueue)
|
pc.runSyncPodStatusFromProviderWorker(ctx, workerID, pc.podStatusQ)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -356,7 +343,7 @@ func (pc *PodController) Run(ctx context.Context, podSyncWorkers int) (retErr er
|
|||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
log.G(ctx).Info("shutting down workers")
|
log.G(ctx).Info("shutting down workers")
|
||||||
pc.k8sQ.ShutDown()
|
pc.k8sQ.ShutDown()
|
||||||
podStatusQueue.ShutDown()
|
pc.podStatusQ.ShutDown()
|
||||||
pc.deletionQ.ShutDown()
|
pc.deletionQ.ShutDown()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|||||||
Reference in New Issue
Block a user