[VK] Use Cache controller and Make create/delete pod Concurrently (#373)

* Add k8s.io/client-go/tools/cache package

* Add cache controller

* Add pod creator and terminator

* Pod Synchronizer

* Clean up

* Add back reconcile

* Remove unnecessary space in log

* Incorprate feedbacks

* dep ensure

* Fix the syntax error

* Fix the merge errors

* Minor Refactor

* Set status

* Pass context together with the pod to the pod channel

* Change to use flag to specify the number of pod sync workers

* Remove the unused const

* Use Stable PROD Region WestUS in Test

EastUS2EUAP is not reliable
This commit is contained in:
Robbie Zhang
2018-10-16 17:20:02 -07:00
committed by GitHub
parent b082eced13
commit 4a7b74ed42
40 changed files with 7173 additions and 89 deletions

83
vendor/k8s.io/apimachinery/pkg/util/cache/cache.go generated vendored Normal file
View File

@@ -0,0 +1,83 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
)
const (
shardsCount int = 32
)
type Cache []*cacheShard
func NewCache(maxSize int) Cache {
if maxSize < shardsCount {
maxSize = shardsCount
}
cache := make(Cache, shardsCount)
for i := 0; i < shardsCount; i++ {
cache[i] = &cacheShard{
items: make(map[uint64]interface{}),
maxSize: maxSize / shardsCount,
}
}
return cache
}
func (c Cache) getShard(index uint64) *cacheShard {
return c[index%uint64(shardsCount)]
}
// Returns true if object already existed, false otherwise.
func (c *Cache) Add(index uint64, obj interface{}) bool {
return c.getShard(index).add(index, obj)
}
func (c *Cache) Get(index uint64) (obj interface{}, found bool) {
return c.getShard(index).get(index)
}
type cacheShard struct {
items map[uint64]interface{}
sync.RWMutex
maxSize int
}
// Returns true if object already existed, false otherwise.
func (s *cacheShard) add(index uint64, obj interface{}) bool {
s.Lock()
defer s.Unlock()
_, isOverwrite := s.items[index]
if !isOverwrite && len(s.items) >= s.maxSize {
var randomKey uint64
for randomKey = range s.items {
break
}
delete(s.items, randomKey)
}
s.items[index] = obj
return isOverwrite
}
func (s *cacheShard) get(index uint64) (obj interface{}, found bool) {
s.RLock()
defer s.RUnlock()
obj, found = s.items[index]
return
}

View File

@@ -0,0 +1,102 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"sync"
"time"
"github.com/hashicorp/golang-lru"
)
// Clock defines an interface for obtaining the current time
type Clock interface {
Now() time.Time
}
// realClock implements the Clock interface by calling time.Now()
type realClock struct{}
func (realClock) Now() time.Time { return time.Now() }
// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with
// a ttl beyond which keys are forcibly expired.
type LRUExpireCache struct {
// clock is used to obtain the current time
clock Clock
cache *lru.Cache
lock sync.Mutex
}
// NewLRUExpireCache creates an expiring cache with the given size
func NewLRUExpireCache(maxSize int) *LRUExpireCache {
return NewLRUExpireCacheWithClock(maxSize, realClock{})
}
// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time.
func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache {
cache, err := lru.New(maxSize)
if err != nil {
// if called with an invalid size
panic(err)
}
return &LRUExpireCache{clock: clock, cache: cache}
}
type cacheEntry struct {
value interface{}
expireTime time.Time
}
// Add adds the value to the cache at key with the specified maximum duration.
func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)})
}
// Get returns the value at the specified key from the cache if it exists and is not
// expired, or returns false.
func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) {
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.cache.Get(key)
if !ok {
return nil, false
}
if c.clock.Now().After(e.(*cacheEntry).expireTime) {
c.cache.Remove(key)
return nil, false
}
return e.(*cacheEntry).value, true
}
// Remove removes the specified key from the cache if it exists
func (c *LRUExpireCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Remove(key)
}
// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to
// get may return not found. It returns all keys from oldest to newest.
func (c *LRUExpireCache) Keys() []interface{} {
c.lock.Lock()
defer c.lock.Unlock()
return c.cache.Keys()
}

313
vendor/k8s.io/apimachinery/pkg/util/diff/diff.go generated vendored Normal file
View File

@@ -0,0 +1,313 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diff
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"text/tabwriter"
"github.com/davecgh/go-spew/spew"
"k8s.io/apimachinery/pkg/util/validation/field"
)
// StringDiff diffs a and b and returns a human readable diff.
func StringDiff(a, b string) string {
ba := []byte(a)
bb := []byte(b)
out := []byte{}
i := 0
for ; i < len(ba) && i < len(bb); i++ {
if ba[i] != bb[i] {
break
}
out = append(out, ba[i])
}
out = append(out, []byte("\n\nA: ")...)
out = append(out, ba[i:]...)
out = append(out, []byte("\n\nB: ")...)
out = append(out, bb[i:]...)
out = append(out, []byte("\n\n")...)
return string(out)
}
// ObjectDiff writes the two objects out as JSON and prints out the identical part of
// the objects followed by the remaining part of 'a' and finally the remaining part of 'b'.
// For debugging tests.
func ObjectDiff(a, b interface{}) string {
ab, err := json.Marshal(a)
if err != nil {
panic(fmt.Sprintf("a: %v", err))
}
bb, err := json.Marshal(b)
if err != nil {
panic(fmt.Sprintf("b: %v", err))
}
return StringDiff(string(ab), string(bb))
}
// ObjectGoPrintDiff is like ObjectDiff, but uses go-spew to print the objects,
// which shows absolutely everything by recursing into every single pointer
// (go's %#v formatters OTOH stop at a certain point). This is needed when you
// can't figure out why reflect.DeepEqual is returning false and nothing is
// showing you differences. This will.
func ObjectGoPrintDiff(a, b interface{}) string {
s := spew.ConfigState{DisableMethods: true}
return StringDiff(
s.Sprintf("%#v", a),
s.Sprintf("%#v", b),
)
}
func ObjectReflectDiff(a, b interface{}) string {
vA, vB := reflect.ValueOf(a), reflect.ValueOf(b)
if vA.Type() != vB.Type() {
return fmt.Sprintf("type A %T and type B %T do not match", a, b)
}
diffs := objectReflectDiff(field.NewPath("object"), vA, vB)
if len(diffs) == 0 {
return "<no diffs>"
}
out := []string{""}
for _, d := range diffs {
elidedA, elidedB := limit(d.a, d.b, 80)
out = append(out,
fmt.Sprintf("%s:", d.path),
fmt.Sprintf(" a: %s", elidedA),
fmt.Sprintf(" b: %s", elidedB),
)
}
return strings.Join(out, "\n")
}
// limit:
// 1. stringifies aObj and bObj
// 2. elides identical prefixes if either is too long
// 3. elides remaining content from the end if either is too long
func limit(aObj, bObj interface{}, max int) (string, string) {
elidedPrefix := ""
elidedASuffix := ""
elidedBSuffix := ""
a, b := fmt.Sprintf("%#v", aObj), fmt.Sprintf("%#v", bObj)
if aObj != nil && bObj != nil {
if aType, bType := fmt.Sprintf("%T", aObj), fmt.Sprintf("%T", bObj); aType != bType {
a = fmt.Sprintf("%s (%s)", a, aType)
b = fmt.Sprintf("%s (%s)", b, bType)
}
}
for {
switch {
case len(a) > max && len(a) > 4 && len(b) > 4 && a[:4] == b[:4]:
// a is too long, b has data, and the first several characters are the same
elidedPrefix = "..."
a = a[2:]
b = b[2:]
case len(b) > max && len(b) > 4 && len(a) > 4 && a[:4] == b[:4]:
// b is too long, a has data, and the first several characters are the same
elidedPrefix = "..."
a = a[2:]
b = b[2:]
case len(a) > max:
a = a[:max]
elidedASuffix = "..."
case len(b) > max:
b = b[:max]
elidedBSuffix = "..."
default:
// both are short enough
return elidedPrefix + a + elidedASuffix, elidedPrefix + b + elidedBSuffix
}
}
}
func public(s string) bool {
if len(s) == 0 {
return false
}
return s[:1] == strings.ToUpper(s[:1])
}
type diff struct {
path *field.Path
a, b interface{}
}
type orderedDiffs []diff
func (d orderedDiffs) Len() int { return len(d) }
func (d orderedDiffs) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d orderedDiffs) Less(i, j int) bool {
a, b := d[i].path.String(), d[j].path.String()
if a < b {
return true
}
return false
}
func objectReflectDiff(path *field.Path, a, b reflect.Value) []diff {
switch a.Type().Kind() {
case reflect.Struct:
var changes []diff
for i := 0; i < a.Type().NumField(); i++ {
if !public(a.Type().Field(i).Name) {
if reflect.DeepEqual(a.Interface(), b.Interface()) {
continue
}
return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
}
if sub := objectReflectDiff(path.Child(a.Type().Field(i).Name), a.Field(i), b.Field(i)); len(sub) > 0 {
changes = append(changes, sub...)
}
}
return changes
case reflect.Ptr, reflect.Interface:
if a.IsNil() || b.IsNil() {
switch {
case a.IsNil() && b.IsNil():
return nil
case a.IsNil():
return []diff{{path: path, a: nil, b: b.Interface()}}
default:
return []diff{{path: path, a: a.Interface(), b: nil}}
}
}
return objectReflectDiff(path, a.Elem(), b.Elem())
case reflect.Chan:
if !reflect.DeepEqual(a.Interface(), b.Interface()) {
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
return nil
case reflect.Slice:
lA, lB := a.Len(), b.Len()
l := lA
if lB < lA {
l = lB
}
if lA == lB && lA == 0 {
if a.IsNil() != b.IsNil() {
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
return nil
}
var diffs []diff
for i := 0; i < l; i++ {
if !reflect.DeepEqual(a.Index(i), b.Index(i)) {
diffs = append(diffs, objectReflectDiff(path.Index(i), a.Index(i), b.Index(i))...)
}
}
for i := l; i < lA; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: a.Index(i), b: nil})
}
for i := l; i < lB; i++ {
diffs = append(diffs, diff{path: path.Index(i), a: nil, b: b.Index(i)})
}
return diffs
case reflect.Map:
if reflect.DeepEqual(a.Interface(), b.Interface()) {
return nil
}
aKeys := make(map[interface{}]interface{})
for _, key := range a.MapKeys() {
aKeys[key.Interface()] = a.MapIndex(key).Interface()
}
var missing []diff
for _, key := range b.MapKeys() {
if _, ok := aKeys[key.Interface()]; ok {
delete(aKeys, key.Interface())
if reflect.DeepEqual(a.MapIndex(key).Interface(), b.MapIndex(key).Interface()) {
continue
}
missing = append(missing, objectReflectDiff(path.Key(fmt.Sprintf("%s", key.Interface())), a.MapIndex(key), b.MapIndex(key))...)
continue
}
missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key.Interface())), a: nil, b: b.MapIndex(key).Interface()})
}
for key, value := range aKeys {
missing = append(missing, diff{path: path.Key(fmt.Sprintf("%s", key)), a: value, b: nil})
}
if len(missing) == 0 {
missing = append(missing, diff{path: path, a: a.Interface(), b: b.Interface()})
}
sort.Sort(orderedDiffs(missing))
return missing
default:
if reflect.DeepEqual(a.Interface(), b.Interface()) {
return nil
}
if !a.CanInterface() {
return []diff{{path: path, a: fmt.Sprintf("%#v", a), b: fmt.Sprintf("%#v", b)}}
}
return []diff{{path: path, a: a.Interface(), b: b.Interface()}}
}
}
// ObjectGoPrintSideBySide prints a and b as textual dumps side by side,
// enabling easy visual scanning for mismatches.
func ObjectGoPrintSideBySide(a, b interface{}) string {
s := spew.ConfigState{
Indent: " ",
// Extra deep spew.
DisableMethods: true,
}
sA := s.Sdump(a)
sB := s.Sdump(b)
linesA := strings.Split(sA, "\n")
linesB := strings.Split(sB, "\n")
width := 0
for _, s := range linesA {
l := len(s)
if l > width {
width = l
}
}
for _, s := range linesB {
l := len(s)
if l > width {
width = l
}
}
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0)
max := len(linesA)
if len(linesB) > max {
max = len(linesB)
}
for i := 0; i < max; i++ {
var a, b string
if i < len(linesA) {
a = linesA[i]
}
if i < len(linesB) {
b = linesB[i]
}
fmt.Fprintf(w, "%s\t%s\n", a, b)
}
w.Flush()
return buf.String()
}

19
vendor/k8s.io/apimachinery/pkg/util/wait/doc.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package wait provides tools for polling or listening for changes
// to a condition.
package wait // import "k8s.io/apimachinery/pkg/util/wait"

405
vendor/k8s.io/apimachinery/pkg/util/wait/wait.go generated vendored Normal file
View File

@@ -0,0 +1,405 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package wait
import (
"context"
"errors"
"math/rand"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/runtime"
)
// For any test of the style:
// ...
// <- time.After(timeout):
// t.Errorf("Timed out")
// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s
// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine
// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test.
var ForeverTestTimeout = time.Second * 30
// NeverStop may be passed to Until to make it never stop.
var NeverStop <-chan struct{} = make(chan struct{})
// Group allows to start a group of goroutines and wait for their completion.
type Group struct {
wg sync.WaitGroup
}
func (g *Group) Wait() {
g.wg.Wait()
}
// StartWithChannel starts f in a new goroutine in the group.
// stopCh is passed to f as an argument. f should stop when stopCh is available.
func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) {
g.Start(func() {
f(stopCh)
})
}
// StartWithContext starts f in a new goroutine in the group.
// ctx is passed to f as an argument. f should stop when ctx.Done() is available.
func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) {
g.Start(func() {
f(ctx)
})
}
// Start starts f in a new goroutine in the group.
func (g *Group) Start(f func()) {
g.wg.Add(1)
go func() {
defer g.wg.Done()
f()
}()
}
// Forever calls f every period for ever.
//
// Forever is syntactic sugar on top of Until.
func Forever(f func(), period time.Duration) {
Until(f, period, NeverStop)
}
// Until loops until stop channel is closed, running f every period.
//
// Until is syntactic sugar on top of JitterUntil with zero jitter factor and
// with sliding = true (which means the timer for period starts after the f
// completes).
func Until(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, true, stopCh)
}
// NonSlidingUntil loops until stop channel is closed, running f every
// period.
//
// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter
// factor, with sliding = false (meaning the timer for period starts at the same
// time as the function starts).
func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) {
JitterUntil(f, period, 0.0, false, stopCh)
}
// JitterUntil loops until stop channel is closed, running f every period.
//
// If jitterFactor is positive, the period is jittered before every run of f.
// If jitterFactor is not positive, the period is unchanged and not jittered.
//
// If sliding is true, the period is computed after f runs. If it is false then
// period includes the runtime for f.
//
// Close stopCh to stop. f may not be invoked if stop channel is already
// closed. Pass NeverStop to if you don't want it stop.
func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) {
var t *time.Timer
var sawTimeout bool
for {
select {
case <-stopCh:
return
default:
}
jitteredPeriod := period
if jitterFactor > 0.0 {
jitteredPeriod = Jitter(period, jitterFactor)
}
if !sliding {
t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
}
func() {
defer runtime.HandleCrash()
f()
}()
if sliding {
t = resetOrReuseTimer(t, jitteredPeriod, sawTimeout)
}
// NOTE: b/c there is no priority selection in golang
// it is possible for this to race, meaning we could
// trigger t.C and stopCh, and t.C select falls through.
// In order to mitigate we re-check stopCh at the beginning
// of every loop to prevent extra executions of f().
select {
case <-stopCh:
return
case <-t.C:
sawTimeout = true
}
}
}
// Jitter returns a time.Duration between duration and duration + maxFactor *
// duration.
//
// This allows clients to avoid converging on periodic behavior. If maxFactor
// is 0.0, a suggested default value will be chosen.
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
if maxFactor <= 0.0 {
maxFactor = 1.0
}
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
return wait
}
// ErrWaitTimeout is returned when the condition exited without success.
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// ConditionFunc returns true if the condition is satisfied, or an error
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
// Backoff holds parameters applied to a Backoff function.
type Backoff struct {
Duration time.Duration // the base duration
Factor float64 // Duration is multiplied by factor each iteration
Jitter float64 // The amount of jitter applied each iteration
Steps int // Exit with error after this many steps
}
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multiplying
// the previous duration by Factor.
//
// If Jitter is greater than zero, a random amount of each duration is added
// (between duration and duration*(1+jitter)).
//
// If the condition never returns true, ErrWaitTimeout is returned. All other
// errors terminate immediately.
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
duration := backoff.Duration
for i := 0; i < backoff.Steps; i++ {
if i != 0 {
adjusted := duration
if backoff.Jitter > 0.0 {
adjusted = Jitter(duration, backoff.Jitter)
}
time.Sleep(adjusted)
duration = time.Duration(float64(duration) * backoff.Factor)
}
if ok, err := condition(); err != nil || ok {
return err
}
}
return ErrWaitTimeout
}
// Poll tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// Poll always waits the interval before the run of 'condition'.
// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to Poll something forever, see PollInfinite.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
return pollInternal(poller(interval, timeout), condition)
}
func pollInternal(wait WaitFunc, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return WaitFor(wait, condition, done)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
// is reached.
//
// PollImmediate always checks 'condition' before waiting for the interval. 'condition'
// will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
return pollImmediateInternal(poller(interval, timeout), condition)
}
func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
}
return pollInternal(wait, condition)
}
// PollInfinite tries a condition func until it returns true or an error
//
// PollInfinite always waits the interval before the run of 'condition'.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollInfinite(interval time.Duration, condition ConditionFunc) error {
done := make(chan struct{})
defer close(done)
return PollUntil(interval, condition, done)
}
// PollImmediateInfinite tries a condition func until it returns true or an error
//
// PollImmediateInfinite runs the 'condition' before waiting for the interval.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
}
return PollInfinite(interval, condition)
}
// PollUntil tries a condition func until it returns true, an error or stopCh is
// closed.
//
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return WaitFor(poller(interval, 0), condition, stopCh)
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
//
// PollImmediateUntil runs the 'condition' before waiting for the interval.
// 'condition' will always be invoked at least once.
func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
done, err := condition()
if err != nil {
return err
}
if done {
return nil
}
select {
case <-stopCh:
return ErrWaitTimeout
default:
return PollUntil(interval, condition, stopCh)
}
}
// WaitFunc creates a channel that receives an item every time a test
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
// placed on the channel and once more when the channel is closed.
//
// If 'fn' returns an error the loop ends and that error is returned, and if
// 'fn' returns true the loop ends and nil is returned.
//
// ErrWaitTimeout will be returned if the channel is closed without fn ever
// returning true.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
c := wait(done)
for {
_, open := <-c
ok, err := fn()
if err != nil {
return err
}
if ok {
return nil
}
if !open {
break
}
}
return ErrWaitTimeout
}
// poller returns a WaitFunc that will send to the channel every interval until
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
func poller(interval, timeout time.Duration) WaitFunc {
return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
ch := make(chan struct{})
go func() {
defer close(ch)
tick := time.NewTicker(interval)
defer tick.Stop()
var after <-chan time.Time
if timeout != 0 {
// time.After is more convenient, but it
// potentially leaves timers around much longer
// than necessary if we exit early.
timer := time.NewTimer(timeout)
after = timer.C
defer timer.Stop()
}
for {
select {
case <-tick.C:
// If the consumer isn't ready for this signal drop it and
// check the other channels.
select {
case ch <- struct{}{}:
default:
}
case <-after:
return
case <-done:
return
}
}
}()
return ch
})
}
// resetOrReuseTimer avoids allocating a new timer if one is already in use.
// Not safe for multiple threads.
func resetOrReuseTimer(t *time.Timer, d time.Duration, sawTimeout bool) *time.Timer {
if t == nil {
return time.NewTimer(d)
}
if !t.Stop() && !sawTimeout {
<-t.C
}
t.Reset(d)
return t
}