tests: introduce e2e suite (#422)
* mock: implement GetStatsSummary Signed-off-by: Paulo Pires <pjpires@gmail.com> * make: use skaffold to deploy vk Signed-off-by: Paulo Pires <pjpires@gmail.com> * test: add an e2e test suite Signed-off-by: Paulo Pires <pjpires@gmail.com> * test: add vendored code Signed-off-by: Paulo Pires <pjpires@gmail.com> * docs: update README.md Signed-off-by: Paulo Pires <pjpires@gmail.com> * ci: run e2e on circleci Signed-off-by: Paulo Pires <pjpires@gmail.com> * make: improve the skaffold target Signed-off-by: Paulo Pires <pjpires@gmail.com> * e2e: fix defer pod deletion Signed-off-by: Paulo Pires <pjpires@gmail.com> * e2e: improve instructions Signed-off-by: Paulo Pires <pjpires@gmail.com> * makefile: default shell is bash Signed-off-by: Paulo Pires <pjpires@gmail.com>
This commit is contained in:
114
vendor/k8s.io/client-go/tools/watch/informerwatcher.go
generated
vendored
Normal file
114
vendor/k8s.io/client-go/tools/watch/informerwatcher.go
generated
vendored
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
func newTicketer() *ticketer {
|
||||
return &ticketer{
|
||||
cond: sync.NewCond(&sync.Mutex{}),
|
||||
}
|
||||
}
|
||||
|
||||
type ticketer struct {
|
||||
counter uint64
|
||||
|
||||
cond *sync.Cond
|
||||
current uint64
|
||||
}
|
||||
|
||||
func (t *ticketer) GetTicket() uint64 {
|
||||
// -1 to start from 0
|
||||
return atomic.AddUint64(&t.counter, 1) - 1
|
||||
}
|
||||
|
||||
func (t *ticketer) WaitForTicket(ticket uint64, f func()) {
|
||||
t.cond.L.Lock()
|
||||
defer t.cond.L.Unlock()
|
||||
for ticket != t.current {
|
||||
t.cond.Wait()
|
||||
}
|
||||
|
||||
f()
|
||||
|
||||
t.current++
|
||||
t.cond.Broadcast()
|
||||
}
|
||||
|
||||
// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface
|
||||
// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method.
|
||||
func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface) {
|
||||
ch := make(chan watch.Event)
|
||||
w := watch.NewProxyWatcher(ch)
|
||||
t := newTicketer()
|
||||
|
||||
indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
go t.WaitForTicket(t.GetTicket(), func() {
|
||||
select {
|
||||
case ch <- watch.Event{
|
||||
Type: watch.Added,
|
||||
Object: obj.(runtime.Object),
|
||||
}:
|
||||
case <-w.StopChan():
|
||||
}
|
||||
})
|
||||
},
|
||||
UpdateFunc: func(old, new interface{}) {
|
||||
go t.WaitForTicket(t.GetTicket(), func() {
|
||||
select {
|
||||
case ch <- watch.Event{
|
||||
Type: watch.Modified,
|
||||
Object: new.(runtime.Object),
|
||||
}:
|
||||
case <-w.StopChan():
|
||||
}
|
||||
})
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
go t.WaitForTicket(t.GetTicket(), func() {
|
||||
staleObj, stale := obj.(cache.DeletedFinalStateUnknown)
|
||||
if stale {
|
||||
// We have no means of passing the additional information down using watch API based on watch.Event
|
||||
// but the caller can filter such objects by checking if metadata.deletionTimestamp is set
|
||||
obj = staleObj
|
||||
}
|
||||
|
||||
select {
|
||||
case ch <- watch.Event{
|
||||
Type: watch.Deleted,
|
||||
Object: obj.(runtime.Object),
|
||||
}:
|
||||
case <-w.StopChan():
|
||||
}
|
||||
})
|
||||
},
|
||||
}, cache.Indexers{})
|
||||
|
||||
go func() {
|
||||
informer.Run(w.StopChan())
|
||||
}()
|
||||
|
||||
return indexer, informer, w
|
||||
}
|
||||
225
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
Normal file
225
vendor/k8s.io/client-go/tools/watch/until.go
generated
vendored
Normal file
@@ -0,0 +1,225 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package watch
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet,
|
||||
// or an error if the condition failed or detected an error state.
|
||||
type PreconditionFunc func(store cache.Store) (bool, error)
|
||||
|
||||
// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet,
|
||||
// or an error if the condition cannot be checked and should terminate. In general, it is better to define
|
||||
// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed
|
||||
// from false to true).
|
||||
type ConditionFunc func(event watch.Event) (bool, error)
|
||||
|
||||
// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry.
|
||||
var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout")
|
||||
|
||||
// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch
|
||||
// encountered. The first condition that returns an error terminates the watch (and the event is also returned).
|
||||
// If no event has been received, the returned event will be nil.
|
||||
// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
|
||||
// Waits until context deadline or until context is canceled.
|
||||
//
|
||||
// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!!
|
||||
// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error.
|
||||
// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below,
|
||||
// Warning: solving such issues.
|
||||
// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone.
|
||||
func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
ch := watcher.ResultChan()
|
||||
defer watcher.Stop()
|
||||
var lastEvent *watch.Event
|
||||
for _, condition := range conditions {
|
||||
// check the next condition against the previous event and short circuit waiting for the next watch
|
||||
if lastEvent != nil {
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
continue
|
||||
}
|
||||
}
|
||||
ConditionSucceeded:
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-ch:
|
||||
if !ok {
|
||||
return lastEvent, ErrWatchClosed
|
||||
}
|
||||
lastEvent = &event
|
||||
|
||||
done, err := condition(event)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
break ConditionSucceeded
|
||||
}
|
||||
|
||||
case <-ctx.Done():
|
||||
return lastEvent, wait.ErrWaitTimeout
|
||||
}
|
||||
}
|
||||
}
|
||||
return lastEvent, nil
|
||||
}
|
||||
|
||||
// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced,
|
||||
// and watches the output until each provided condition succeeds, in a way that is identical
|
||||
// to function UntilWithoutRetry. (See above.)
|
||||
// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'.
|
||||
// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will
|
||||
// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple
|
||||
// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will
|
||||
// re-list to recover and you always get an event, if there has been a change, after recovery.
|
||||
// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for
|
||||
// particular object, not between more of them even it's the same resource.
|
||||
// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like:
|
||||
// waiting for object reaching a state, "small" controllers, ...
|
||||
func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
indexer, informer, watcher := NewIndexerInformerWatcher(lw, objType)
|
||||
// Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and
|
||||
// let UntilWithoutRetry to stop it
|
||||
defer watcher.Stop()
|
||||
|
||||
if precondition != nil {
|
||||
if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) {
|
||||
return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err())
|
||||
}
|
||||
|
||||
done, err := precondition(indexer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if done {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return UntilWithoutRetry(ctx, watcher, conditions...)
|
||||
}
|
||||
|
||||
// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration.
|
||||
func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) {
|
||||
if timeout < 0 {
|
||||
// This should be handled in validation
|
||||
glog.Errorf("Timeout for context shall not be negative!")
|
||||
timeout = 0
|
||||
}
|
||||
|
||||
if timeout == 0 {
|
||||
return context.WithCancel(parent)
|
||||
}
|
||||
|
||||
return context.WithTimeout(parent, timeout)
|
||||
}
|
||||
|
||||
// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout
|
||||
// if timeout is exceeded without all conditions returning true, or an error if an error occurs.
|
||||
// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until.
|
||||
// TODO: remove when no longer used
|
||||
//
|
||||
// Deprecated: Use UntilWithSync instead.
|
||||
func ListWatchUntil(timeout time.Duration, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) {
|
||||
if len(conditions) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
list, err := lw.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initialItems, err := meta.ExtractList(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// use the initial items as simulated "adds"
|
||||
var lastEvent *watch.Event
|
||||
currIndex := 0
|
||||
passedConditions := 0
|
||||
for _, condition := range conditions {
|
||||
// check the next condition against the previous event and short circuit waiting for the next watch
|
||||
if lastEvent != nil {
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ConditionSucceeded:
|
||||
for currIndex < len(initialItems) {
|
||||
lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]}
|
||||
currIndex++
|
||||
|
||||
done, err := condition(*lastEvent)
|
||||
if err != nil {
|
||||
return lastEvent, err
|
||||
}
|
||||
if done {
|
||||
passedConditions = passedConditions + 1
|
||||
break ConditionSucceeded
|
||||
}
|
||||
}
|
||||
}
|
||||
if passedConditions == len(conditions) {
|
||||
return lastEvent, nil
|
||||
}
|
||||
remainingConditions := conditions[passedConditions:]
|
||||
|
||||
metaObj, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
currResourceVersion := metaObj.GetResourceVersion()
|
||||
|
||||
watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := ContextWithOptionalTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
evt, err := UntilWithoutRetry(ctx, watchInterface, remainingConditions...)
|
||||
if err == ErrWatchClosed {
|
||||
// present a consistent error interface to callers
|
||||
err = wait.ErrWaitTimeout
|
||||
}
|
||||
return evt, err
|
||||
}
|
||||
305
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
Normal file
305
vendor/k8s.io/kubernetes/pkg/api/v1/pod/util.go
generated
vendored
Normal file
@@ -0,0 +1,305 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// FindPort locates the container port for the given pod and portName. If the
|
||||
// targetPort is a number, use that. If the targetPort is a string, look that
|
||||
// string up in all named ports in all containers in the target pod. If no
|
||||
// match is found, fail.
|
||||
func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) {
|
||||
portName := svcPort.TargetPort
|
||||
switch portName.Type {
|
||||
case intstr.String:
|
||||
name := portName.StrVal
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, port := range container.Ports {
|
||||
if port.Name == name && port.Protocol == svcPort.Protocol {
|
||||
return int(port.ContainerPort), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
case intstr.Int:
|
||||
return portName.IntValue(), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
|
||||
}
|
||||
|
||||
// Visitor is called with each object name, and returns true if visiting should continue
|
||||
type Visitor func(name string) (shouldContinue bool)
|
||||
|
||||
// VisitPodSecretNames invokes the visitor function with the name of every secret
|
||||
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
|
||||
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
|
||||
// Returns true if visiting completed, false if visiting was short-circuited.
|
||||
func VisitPodSecretNames(pod *v1.Pod, visitor Visitor) bool {
|
||||
for _, reference := range pod.Spec.ImagePullSecrets {
|
||||
if !visitor(reference.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i := range pod.Spec.InitContainers {
|
||||
if !visitContainerSecretNames(&pod.Spec.InitContainers[i], visitor) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i := range pod.Spec.Containers {
|
||||
if !visitContainerSecretNames(&pod.Spec.Containers[i], visitor) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
var source *v1.VolumeSource
|
||||
|
||||
for i := range pod.Spec.Volumes {
|
||||
source = &pod.Spec.Volumes[i].VolumeSource
|
||||
switch {
|
||||
case source.AzureFile != nil:
|
||||
if len(source.AzureFile.SecretName) > 0 && !visitor(source.AzureFile.SecretName) {
|
||||
return false
|
||||
}
|
||||
case source.CephFS != nil:
|
||||
if source.CephFS.SecretRef != nil && !visitor(source.CephFS.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.Cinder != nil:
|
||||
if source.Cinder.SecretRef != nil && !visitor(source.Cinder.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.FlexVolume != nil:
|
||||
if source.FlexVolume.SecretRef != nil && !visitor(source.FlexVolume.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.Projected != nil:
|
||||
for j := range source.Projected.Sources {
|
||||
if source.Projected.Sources[j].Secret != nil {
|
||||
if !visitor(source.Projected.Sources[j].Secret.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
case source.RBD != nil:
|
||||
if source.RBD.SecretRef != nil && !visitor(source.RBD.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.Secret != nil:
|
||||
if !visitor(source.Secret.SecretName) {
|
||||
return false
|
||||
}
|
||||
case source.ScaleIO != nil:
|
||||
if source.ScaleIO.SecretRef != nil && !visitor(source.ScaleIO.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.ISCSI != nil:
|
||||
if source.ISCSI.SecretRef != nil && !visitor(source.ISCSI.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
case source.StorageOS != nil:
|
||||
if source.StorageOS.SecretRef != nil && !visitor(source.StorageOS.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func visitContainerSecretNames(container *v1.Container, visitor Visitor) bool {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.SecretRef != nil {
|
||||
if !visitor(env.SecretRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, envVar := range container.Env {
|
||||
if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil {
|
||||
if !visitor(envVar.ValueFrom.SecretKeyRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// VisitPodConfigmapNames invokes the visitor function with the name of every configmap
|
||||
// referenced by the pod spec. If visitor returns false, visiting is short-circuited.
|
||||
// Transitive references (e.g. pod -> pvc -> pv -> secret) are not visited.
|
||||
// Returns true if visiting completed, false if visiting was short-circuited.
|
||||
func VisitPodConfigmapNames(pod *v1.Pod, visitor Visitor) bool {
|
||||
for i := range pod.Spec.InitContainers {
|
||||
if !visitContainerConfigmapNames(&pod.Spec.InitContainers[i], visitor) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for i := range pod.Spec.Containers {
|
||||
if !visitContainerConfigmapNames(&pod.Spec.Containers[i], visitor) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
var source *v1.VolumeSource
|
||||
for i := range pod.Spec.Volumes {
|
||||
source = &pod.Spec.Volumes[i].VolumeSource
|
||||
switch {
|
||||
case source.Projected != nil:
|
||||
for j := range source.Projected.Sources {
|
||||
if source.Projected.Sources[j].ConfigMap != nil {
|
||||
if !visitor(source.Projected.Sources[j].ConfigMap.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
case source.ConfigMap != nil:
|
||||
if !visitor(source.ConfigMap.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func visitContainerConfigmapNames(container *v1.Container, visitor Visitor) bool {
|
||||
for _, env := range container.EnvFrom {
|
||||
if env.ConfigMapRef != nil {
|
||||
if !visitor(env.ConfigMapRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, envVar := range container.Env {
|
||||
if envVar.ValueFrom != nil && envVar.ValueFrom.ConfigMapKeyRef != nil {
|
||||
if !visitor(envVar.ValueFrom.ConfigMapKeyRef.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetContainerStatus extracts the status of container "name" from "statuses".
|
||||
// It also returns if "name" exists.
|
||||
func GetContainerStatus(statuses []v1.ContainerStatus, name string) (v1.ContainerStatus, bool) {
|
||||
for i := range statuses {
|
||||
if statuses[i].Name == name {
|
||||
return statuses[i], true
|
||||
}
|
||||
}
|
||||
return v1.ContainerStatus{}, false
|
||||
}
|
||||
|
||||
// GetExistingContainerStatus extracts the status of container "name" from "statuses",
|
||||
// It also returns if "name" exists.
|
||||
func GetExistingContainerStatus(statuses []v1.ContainerStatus, name string) v1.ContainerStatus {
|
||||
status, _ := GetContainerStatus(statuses, name)
|
||||
return status
|
||||
}
|
||||
|
||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||
// Precondition for an available pod is that it must be ready. On top
|
||||
// of that, there are two cases when a pod can be considered available:
|
||||
// 1. minReadySeconds == 0, or
|
||||
// 2. LastTransitionTime (is set) + minReadySeconds < current time
|
||||
func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now metav1.Time) bool {
|
||||
if !IsPodReady(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
c := GetPodReadyCondition(pod.Status)
|
||||
minReadySecondsDuration := time.Duration(minReadySeconds) * time.Second
|
||||
if minReadySeconds == 0 || !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(minReadySecondsDuration).Before(now.Time) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsPodReady returns true if a pod is ready; false otherwise.
|
||||
func IsPodReady(pod *v1.Pod) bool {
|
||||
return IsPodReadyConditionTrue(pod.Status)
|
||||
}
|
||||
|
||||
// IsPodReady returns true if a pod is ready; false otherwise.
|
||||
func IsPodReadyConditionTrue(status v1.PodStatus) bool {
|
||||
condition := GetPodReadyCondition(status)
|
||||
return condition != nil && condition.Status == v1.ConditionTrue
|
||||
}
|
||||
|
||||
// Extracts the pod ready condition from the given status and returns that.
|
||||
// Returns nil if the condition is not present.
|
||||
func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
|
||||
_, condition := GetPodCondition(&status, v1.PodReady)
|
||||
return condition
|
||||
}
|
||||
|
||||
// GetPodCondition extracts the provided condition from the given status and returns that.
|
||||
// Returns nil and -1 if the condition is not present, and the index of the located condition.
|
||||
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
|
||||
if status == nil {
|
||||
return -1, nil
|
||||
}
|
||||
return GetPodConditionFromList(status.Conditions, conditionType)
|
||||
}
|
||||
|
||||
// GetPodConditionFromList extracts the provided condition from the given list of condition and
|
||||
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
|
||||
func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
|
||||
if conditions == nil {
|
||||
return -1, nil
|
||||
}
|
||||
for i := range conditions {
|
||||
if conditions[i].Type == conditionType {
|
||||
return i, &conditions[i]
|
||||
}
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Updates existing pod condition or creates a new one. Sets LastTransitionTime to now if the
|
||||
// status has changed.
|
||||
// Returns true if pod condition has changed or has been added.
|
||||
func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool {
|
||||
condition.LastTransitionTime = metav1.Now()
|
||||
// Try to find this pod condition.
|
||||
conditionIndex, oldCondition := GetPodCondition(status, condition.Type)
|
||||
|
||||
if oldCondition == nil {
|
||||
// We are adding new pod condition.
|
||||
status.Conditions = append(status.Conditions, *condition)
|
||||
return true
|
||||
} else {
|
||||
// We are updating an existing condition, so we need to check if it has changed.
|
||||
if condition.Status == oldCondition.Status {
|
||||
condition.LastTransitionTime = oldCondition.LastTransitionTime
|
||||
}
|
||||
|
||||
isEqual := condition.Status == oldCondition.Status &&
|
||||
condition.Reason == oldCondition.Reason &&
|
||||
condition.Message == oldCondition.Message &&
|
||||
condition.LastProbeTime.Equal(&oldCondition.LastProbeTime) &&
|
||||
condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime)
|
||||
|
||||
status.Conditions[conditionIndex] = *condition
|
||||
// Return true if one of the fields have changed.
|
||||
return !isEqual
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user