Fill in Default Values for CPU/Memory (#130)
Update k8s client and the dependencies ACI client change for Mocking Add ACI Provider Mock Tests Add the Windows development environment Add UT for Default Resource Requests Enable the make test in Docker file Update the vendors
This commit is contained in:
6
vendor/k8s.io/client-go/tools/cache/BUILD
generated
vendored
6
vendor/k8s.io/client-go/tools/cache/BUILD
generated
vendored
@@ -22,6 +22,8 @@ go_test(
|
||||
"store_test.go",
|
||||
"undelta_store_test.go",
|
||||
],
|
||||
features = ["-race"],
|
||||
importpath = "k8s.io/client-go/tools/cache",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/github.com/google/gofuzz:go_default_library",
|
||||
@@ -33,7 +35,6 @@ go_test(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/cache/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -61,6 +62,7 @@ go_library(
|
||||
"thread_safe_store.go",
|
||||
"undelta_store.go",
|
||||
],
|
||||
importpath = "k8s.io/client-go/tools/cache",
|
||||
deps = [
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
@@ -78,9 +80,9 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/pager:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/buffer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
4
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
4
vendor/k8s.io/client-go/tools/cache/delta_fifo.go
generated
vendored
@@ -539,6 +539,10 @@ func (f *DeltaFIFO) Resync() error {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
if f.knownObjects == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
keys := f.knownObjects.ListKeys()
|
||||
for _, k := range keys {
|
||||
if err := f.syncKeyLocked(k); err != nil {
|
||||
|
||||
2
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
2
vendor/k8s.io/client-go/tools/cache/fifo.go
generated
vendored
@@ -169,7 +169,7 @@ func (f *FIFO) AddIfNotPresent(obj interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIfNotPresent assumes the fifo lock is already held and adds the the provided
|
||||
// addIfNotPresent assumes the fifo lock is already held and adds the provided
|
||||
// item to the queue under id if it does not already exist.
|
||||
func (f *FIFO) addIfNotPresent(id string, obj interface{}) {
|
||||
f.populated = true
|
||||
|
||||
38
vendor/k8s.io/client-go/tools/cache/index_test.go
generated
vendored
38
vendor/k8s.io/client-go/tools/cache/index_test.go
generated
vendored
@@ -22,7 +22,6 @@ import (
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
func testIndexFunc(obj interface{}) ([]string, error) {
|
||||
@@ -78,6 +77,11 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
if len(erniePods) != 2 {
|
||||
t.Errorf("Expected 2 pods but got %v", len(erniePods))
|
||||
}
|
||||
for _, erniePod := range erniePods {
|
||||
if erniePod.(*v1.Pod).Name != "one" && erniePod.(*v1.Pod).Name != "tre" {
|
||||
t.Errorf("Expected only 'one' or 'tre' but got %s", erniePod.(*v1.Pod).Name)
|
||||
}
|
||||
}
|
||||
|
||||
bertPods, err := index.ByIndex("byUser", "bert")
|
||||
if err != nil {
|
||||
@@ -86,6 +90,11 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
if len(bertPods) != 2 {
|
||||
t.Errorf("Expected 2 pods but got %v", len(bertPods))
|
||||
}
|
||||
for _, bertPod := range bertPods {
|
||||
if bertPod.(*v1.Pod).Name != "one" && bertPod.(*v1.Pod).Name != "two" {
|
||||
t.Errorf("Expected only 'one' or 'two' but got %s", bertPod.(*v1.Pod).Name)
|
||||
}
|
||||
}
|
||||
|
||||
oscarPods, err := index.ByIndex("byUser", "oscar")
|
||||
if err != nil {
|
||||
@@ -94,6 +103,11 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
if len(oscarPods) != 1 {
|
||||
t.Errorf("Expected 1 pods but got %v", len(erniePods))
|
||||
}
|
||||
for _, oscarPod := range oscarPods {
|
||||
if oscarPod.(*v1.Pod).Name != "two" {
|
||||
t.Errorf("Expected only 'two' but got %s", oscarPod.(*v1.Pod).Name)
|
||||
}
|
||||
}
|
||||
|
||||
ernieAndBertKeys, err := index.Index("byUser", pod1)
|
||||
if err != nil {
|
||||
@@ -102,6 +116,11 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
if len(ernieAndBertKeys) != 3 {
|
||||
t.Errorf("Expected 3 pods but got %v", len(ernieAndBertKeys))
|
||||
}
|
||||
for _, ernieAndBertKey := range ernieAndBertKeys {
|
||||
if ernieAndBertKey.(*v1.Pod).Name != "one" && ernieAndBertKey.(*v1.Pod).Name != "two" && ernieAndBertKey.(*v1.Pod).Name != "tre" {
|
||||
t.Errorf("Expected only 'one', 'two' or 'tre' but got %s", ernieAndBertKey.(*v1.Pod).Name)
|
||||
}
|
||||
}
|
||||
|
||||
index.Delete(pod3)
|
||||
erniePods, err = index.ByIndex("byUser", "ernie")
|
||||
@@ -111,6 +130,12 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
if len(erniePods) != 1 {
|
||||
t.Errorf("Expected 1 pods but got %v", len(erniePods))
|
||||
}
|
||||
for _, erniePod := range erniePods {
|
||||
if erniePod.(*v1.Pod).Name != "one" {
|
||||
t.Errorf("Expected only 'one' but got %s", erniePod.(*v1.Pod).Name)
|
||||
}
|
||||
}
|
||||
|
||||
elmoPods, err := index.ByIndex("byUser", "elmo")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
@@ -119,11 +144,7 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
t.Errorf("Expected 0 pods but got %v", len(elmoPods))
|
||||
}
|
||||
|
||||
obj, err := scheme.Scheme.DeepCopy(pod2)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
copyOfPod2 := obj.(*v1.Pod)
|
||||
copyOfPod2 := pod2.DeepCopy()
|
||||
copyOfPod2.Annotations["users"] = "oscar"
|
||||
index.Update(copyOfPod2)
|
||||
bertPods, err = index.ByIndex("byUser", "bert")
|
||||
@@ -133,5 +154,10 @@ func TestMultiIndexKeys(t *testing.T) {
|
||||
if len(bertPods) != 1 {
|
||||
t.Errorf("Expected 1 pods but got %v", len(bertPods))
|
||||
}
|
||||
for _, bertPod := range bertPods {
|
||||
if bertPod.(*v1.Pod).Name != "one" {
|
||||
t.Errorf("Expected only 'one' but got %s", bertPod.(*v1.Pod).Name)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
17
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
17
vendor/k8s.io/client-go/tools/cache/listwatch.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/pager"
|
||||
@@ -51,8 +52,7 @@ type WatchFunc func(options metav1.ListOptions) (watch.Interface, error)
|
||||
type ListWatch struct {
|
||||
ListFunc ListFunc
|
||||
WatchFunc WatchFunc
|
||||
// DisableChunking requests no chunking for this list watcher. It has no effect in Kubernetes 1.8, but in
|
||||
// 1.9 will allow a controller to opt out of chunking.
|
||||
// DisableChunking requests no chunking for this list watcher.
|
||||
DisableChunking bool
|
||||
}
|
||||
|
||||
@@ -93,9 +93,7 @@ func timeoutFromListOptions(options metav1.ListOptions) time.Duration {
|
||||
|
||||
// List a set of apiserver resources
|
||||
func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
// chunking will become the default for list watchers starting in Kubernetes 1.9, unless
|
||||
// otherwise disabled.
|
||||
if false && !lw.DisableChunking {
|
||||
if !lw.DisableChunking {
|
||||
return pager.New(pager.SimplePageFunc(lw.ListFunc)).List(context.TODO(), options)
|
||||
}
|
||||
return lw.ListFunc(options)
|
||||
@@ -106,6 +104,8 @@ func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error)
|
||||
return lw.WatchFunc(options)
|
||||
}
|
||||
|
||||
// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
|
||||
// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
|
||||
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
|
||||
func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch.ConditionFunc) (*watch.Event, error) {
|
||||
if len(conditions) == 0 {
|
||||
@@ -169,5 +169,10 @@ func ListWatchUntil(timeout time.Duration, lw ListerWatcher, conditions ...watch
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return watch.Until(timeout, watchInterface, remainingConditions...)
|
||||
evt, err := watch.Until(timeout, watchInterface, remainingConditions...)
|
||||
if err == watch.ErrWatchClosed {
|
||||
// present a consistent error interface to callers
|
||||
err = wait.ErrWaitTimeout
|
||||
}
|
||||
return evt, err
|
||||
}
|
||||
|
||||
16
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
16
vendor/k8s.io/client-go/tools/cache/mutation_detector.go
generated
vendored
@@ -26,7 +26,6 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
var mutationDetectionEnabled = false
|
||||
@@ -96,18 +95,13 @@ func (d *defaultCacheMutationDetector) AddObject(obj interface{}) {
|
||||
if _, ok := obj.(DeletedFinalStateUnknown); ok {
|
||||
return
|
||||
}
|
||||
if _, ok := obj.(runtime.Object); !ok {
|
||||
return
|
||||
}
|
||||
if obj, ok := obj.(runtime.Object); ok {
|
||||
copiedObj := obj.DeepCopyObject()
|
||||
|
||||
copiedObj, err := scheme.Scheme.Copy(obj.(runtime.Object))
|
||||
if err != nil {
|
||||
return
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
}
|
||||
|
||||
d.lock.Lock()
|
||||
defer d.lock.Unlock()
|
||||
d.cachedObjs = append(d.cachedObjs, cacheObj{cached: obj, copied: copiedObj})
|
||||
}
|
||||
|
||||
func (d *defaultCacheMutationDetector) CompareObjects() {
|
||||
|
||||
3
vendor/k8s.io/client-go/tools/cache/processor_listener_test.go
generated
vendored
3
vendor/k8s.io/client-go/tools/cache/processor_listener_test.go
generated
vendored
@@ -34,11 +34,12 @@ func BenchmarkListener(b *testing.B) {
|
||||
var swg sync.WaitGroup
|
||||
swg.Add(b.N)
|
||||
b.SetParallelism(concurrencyLevel)
|
||||
// Preallocate enough space so that benchmark does not run out of it
|
||||
pl := newProcessListener(&ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
swg.Done()
|
||||
},
|
||||
}, 0, 0, time.Now())
|
||||
}, 0, 0, time.Now(), 1024*1024)
|
||||
var wg wait.Group
|
||||
defer wg.Wait() // Wait for .run and .pop to stop
|
||||
defer close(pl.addCh) // Tell .run and .pop to stop
|
||||
|
||||
13
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
13
vendor/k8s.io/client-go/tools/cache/reflector.go
generated
vendored
@@ -109,7 +109,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
||||
r := &Reflector{
|
||||
name: name,
|
||||
// we need this to be unique per process (some names are still the same)but obvious who it belongs to
|
||||
metrics: newReflectorMetrics(makeValidPromethusMetricName(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
|
||||
metrics: newReflectorMetrics(makeValidPromethusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))),
|
||||
listerWatcher: lw,
|
||||
store: store,
|
||||
expectedType: reflect.TypeOf(expectedType),
|
||||
@@ -120,9 +120,9 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{},
|
||||
return r
|
||||
}
|
||||
|
||||
func makeValidPromethusMetricName(in string) string {
|
||||
func makeValidPromethusMetricLabel(in string) string {
|
||||
// this isn't perfect, but it removes our common characters
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_").Replace(in)
|
||||
return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in)
|
||||
}
|
||||
|
||||
// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common
|
||||
@@ -295,6 +295,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
||||
}()
|
||||
|
||||
for {
|
||||
// give the stopCh a chance to stop the loop, even in case of continue statements further down on errors
|
||||
select {
|
||||
case <-stopCh:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0))
|
||||
options = metav1.ListOptions{
|
||||
ResourceVersion: resourceVersion,
|
||||
|
||||
39
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
39
vendor/k8s.io/client-go/tools/cache/shared_informer.go
generated
vendored
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/buffer"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@@ -92,8 +93,13 @@ func NewSharedIndexInformer(lw ListerWatcher, objType runtime.Object, defaultEve
|
||||
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
|
||||
type InformerSynced func() bool
|
||||
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
const syncedPollPeriod = 100 * time.Millisecond
|
||||
const (
|
||||
// syncedPollPeriod controls how often you look at the status of your sync funcs
|
||||
syncedPollPeriod = 100 * time.Millisecond
|
||||
|
||||
// initialBufferSize is the initial number of event notifications that can be buffered.
|
||||
initialBufferSize = 1024
|
||||
)
|
||||
|
||||
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
|
||||
// if the controller should shutdown
|
||||
@@ -313,7 +319,7 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
|
||||
}
|
||||
}
|
||||
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now())
|
||||
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
|
||||
|
||||
if !s.started {
|
||||
s.processor.addListener(listener)
|
||||
@@ -465,6 +471,13 @@ type processorListener struct {
|
||||
|
||||
handler ResourceEventHandler
|
||||
|
||||
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
|
||||
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better.
|
||||
pendingNotifications buffer.RingGrowing
|
||||
|
||||
// requestedResyncPeriod is how frequently the listener wants a full resync from the shared informer
|
||||
requestedResyncPeriod time.Duration
|
||||
// resyncPeriod is how frequently the listener wants a full resync from the shared informer. This
|
||||
@@ -477,11 +490,12 @@ type processorListener struct {
|
||||
resyncLock sync.Mutex
|
||||
}
|
||||
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time) *processorListener {
|
||||
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
|
||||
ret := &processorListener{
|
||||
nextCh: make(chan interface{}),
|
||||
addCh: make(chan interface{}),
|
||||
handler: handler,
|
||||
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
|
||||
requestedResyncPeriod: requestedResyncPeriod,
|
||||
resyncPeriod: resyncPeriod,
|
||||
}
|
||||
@@ -499,25 +513,16 @@ func (p *processorListener) pop() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer close(p.nextCh) // Tell .run() to stop
|
||||
|
||||
// pendingNotifications is an unbounded slice that holds all notifications not yet distributed
|
||||
// there is one per listener, but a failing/stalled listener will have infinite pendingNotifications
|
||||
// added until we OOM.
|
||||
// TODO This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
|
||||
// we should try to do something better
|
||||
var pendingNotifications []interface{}
|
||||
var nextCh chan<- interface{}
|
||||
var notification interface{}
|
||||
for {
|
||||
select {
|
||||
case nextCh <- notification:
|
||||
// Notification dispatched
|
||||
if len(pendingNotifications) == 0 { // Nothing to pop
|
||||
var ok bool
|
||||
notification, ok = p.pendingNotifications.ReadOne()
|
||||
if !ok { // Nothing to pop
|
||||
nextCh = nil // Disable this select case
|
||||
notification = nil
|
||||
} else {
|
||||
notification = pendingNotifications[0]
|
||||
pendingNotifications[0] = nil
|
||||
pendingNotifications = pendingNotifications[1:]
|
||||
}
|
||||
case notificationToAdd, ok := <-p.addCh:
|
||||
if !ok {
|
||||
@@ -528,7 +533,7 @@ func (p *processorListener) pop() {
|
||||
notification = notificationToAdd
|
||||
nextCh = p.nextCh
|
||||
} else { // There is already a notification waiting to be dispatched
|
||||
pendingNotifications = append(pendingNotifications, notificationToAdd)
|
||||
p.pendingNotifications.WriteOne(notificationToAdd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
3
vendor/k8s.io/client-go/tools/cache/testing/BUILD
generated
vendored
3
vendor/k8s.io/client-go/tools/cache/testing/BUILD
generated
vendored
@@ -9,6 +9,7 @@ load(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["fake_controller_source_test.go"],
|
||||
importpath = "k8s.io/client-go/tools/cache/testing",
|
||||
library = ":go_default_library",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
@@ -20,6 +21,7 @@ go_test(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["fake_controller_source.go"],
|
||||
importpath = "k8s.io/client-go/tools/cache/testing",
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
@@ -27,7 +29,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
13
vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source.go
generated
vendored
13
vendor/k8s.io/client-go/tools/cache/testing/fake_controller_source.go
generated
vendored
@@ -28,7 +28,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
func NewFakeControllerSource() *FakeControllerSource {
|
||||
@@ -153,11 +152,7 @@ func (f *FakeControllerSource) getListItemsLocked() ([]runtime.Object, error) {
|
||||
// Otherwise, if they make a change and write it back, they
|
||||
// will inadvertently change our canonical copy (in
|
||||
// addition to racing with other clients).
|
||||
objCopy, err := scheme.Scheme.DeepCopy(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list = append(list, objCopy.(runtime.Object))
|
||||
list = append(list, obj.DeepCopyObject())
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
@@ -242,11 +237,7 @@ func (f *FakeControllerSource) Watch(options metav1.ListOptions) (watch.Interfac
|
||||
// it back, they will inadvertently change the our
|
||||
// canonical copy (in addition to racing with other
|
||||
// clients).
|
||||
objCopy, err := scheme.Scheme.DeepCopy(c.Object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: objCopy.(runtime.Object)})
|
||||
changes = append(changes, watch.Event{Type: c.Type, Object: c.Object.DeepCopyObject()})
|
||||
}
|
||||
return f.Broadcaster.WatchWithPrefix(changes), nil
|
||||
} else if rc > len(f.changes) {
|
||||
|
||||
10
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
10
vendor/k8s.io/client-go/tools/cache/thread_safe_store.go
generated
vendored
@@ -241,7 +241,7 @@ func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error {
|
||||
|
||||
// updateIndices modifies the objects location in the managed indexes, if this is an update, you must provide an oldObj
|
||||
// updateIndices must be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) error {
|
||||
func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, key string) {
|
||||
// if we got an old object, we need to remove it before we add it again
|
||||
if oldObj != nil {
|
||||
c.deleteFromIndices(oldObj, key)
|
||||
@@ -249,7 +249,7 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(newObj)
|
||||
if err != nil {
|
||||
return err
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
index := c.indices[name]
|
||||
if index == nil {
|
||||
@@ -266,16 +266,15 @@ func (c *threadSafeMap) updateIndices(oldObj interface{}, newObj interface{}, ke
|
||||
set.Insert(key)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteFromIndices removes the object from each of the managed indexes
|
||||
// it is intended to be called from a function that already has a lock on the cache
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error {
|
||||
func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) {
|
||||
for name, indexFunc := range c.indexers {
|
||||
indexValues, err := indexFunc(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err))
|
||||
}
|
||||
|
||||
index := c.indices[name]
|
||||
@@ -289,7 +288,6 @@ func (c *threadSafeMap) deleteFromIndices(obj interface{}, key string) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *threadSafeMap) Resync() error {
|
||||
|
||||
Reference in New Issue
Block a user