Remove Server object (#629)

This had some weird shared responsibility with the PodController.
Instead just move the functionality to the PodController.
This commit is contained in:
Brian Goff
2019-06-01 09:36:38 -07:00
committed by GitHub
parent 21d8ba2206
commit 71546a908f
6 changed files with 226 additions and 267 deletions

View File

@@ -2,11 +2,14 @@ package vkubelet
import (
"context"
"strconv"
"time"
"github.com/cpuguy83/strongerrors/status/ocstatus"
pkgerrors "github.com/pkg/errors"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/trace"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/workqueue"
)
@@ -26,7 +29,10 @@ func handleQueueItem(ctx context.Context, q workqueue.RateLimitingInterface, han
return false
}
log.G(ctx).Debug("Got queue object")
err := func(obj interface{}) error {
defer log.G(ctx).Debug("Processed queue item")
// We call Done here so the work queue knows we have finished processing this item.
// We also must remember to call Forget if we do not want this work item being re-queued.
// For example, we do not call Forget if a transient error occurs.
@@ -43,6 +49,7 @@ func handleQueueItem(ctx context.Context, q workqueue.RateLimitingInterface, han
log.G(ctx).Warnf("expected string in work queue item but got %#v", obj)
return nil
}
// Add the current key as an attribute to the current span.
ctx = span.WithField(ctx, "key", key)
// Run the syncHandler, passing it the namespace/name string of the Pod resource to be synced.
@@ -71,3 +78,64 @@ func handleQueueItem(ctx context.Context, q workqueue.RateLimitingInterface, han
return true
}
func (pc *PodController) runProviderSyncWorkers(ctx context.Context, q workqueue.RateLimitingInterface, numWorkers int) {
for i := 0; i < numWorkers; i++ {
go func(index int) {
workerID := strconv.Itoa(index)
pc.runProviderSyncWorker(ctx, workerID, q)
}(i)
}
}
func (pc *PodController) runProviderSyncWorker(ctx context.Context, workerID string, q workqueue.RateLimitingInterface) {
for pc.processPodStatusUpdate(ctx, workerID, q) {
}
}
func (pc *PodController) processPodStatusUpdate(ctx context.Context, workerID string, q workqueue.RateLimitingInterface) bool {
ctx, span := trace.StartSpan(ctx, "processPodStatusUpdate")
defer span.End()
// Add the ID of the current worker as an attribute to the current span.
ctx = span.WithField(ctx, "workerID", workerID)
return handleQueueItem(ctx, q, pc.podStatusHandler)
}
// providerSyncLoop syncronizes pod states from the provider back to kubernetes
// Deprecated: This is only used when the provider does not support async updates
// Providers should implement async update support, even if it just means copying
// something like this in.
func (pc *PodController) providerSyncLoop(ctx context.Context, q workqueue.RateLimitingInterface) {
const sleepTime = 5 * time.Second
t := time.NewTimer(sleepTime)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
t.Stop()
ctx, span := trace.StartSpan(ctx, "syncActualState")
pc.updatePodStatuses(ctx, q)
span.End()
// restart the timer
t.Reset(sleepTime)
}
}
}
func (pc *PodController) runSyncFromProvider(ctx context.Context, q workqueue.RateLimitingInterface) {
if pn, ok := pc.provider.(PodNotifier); ok {
pn.NotifyPods(ctx, func(pod *corev1.Pod) {
enqueuePodStatusUpdate(ctx, q, pod)
})
} else {
go pc.providerSyncLoop(ctx, q)
}
}