Apply suggestions from code review

Typos and punctuation fixes.

Co-Authored-By: Pires <1752631+pires@users.noreply.github.com>
This commit is contained in:
Brian Goff
2019-10-24 07:42:21 -07:00
parent 4ee2c4d370
commit 31c8fbaa41
3 changed files with 16 additions and 16 deletions

View File

@@ -107,7 +107,7 @@ func TestPodLifecycle(t *testing.T) {
}
// isPodDeletedGracefullyFunc is a condition func that waits until the pod is in a terminal state, which is the VK's
// action when the pod is deleted from the provider
// action when the pod is deleted from the provider.
isPodDeletedGracefullyFunc := func(ctx context.Context, watcher watch.Interface) error {
_, watchErr := watchutils.UntilWithoutRetry(ctx, watcher, func(ev watch.Event) (bool, error) {
log.G(ctx).WithField("event", ev).Info("got event")
@@ -130,7 +130,7 @@ func TestPodLifecycle(t *testing.T) {
t.Run(env, func(t *testing.T) {
// createStartDeleteScenario tests the basic flow of creating a pod, waiting for it to start, and deleting
// it gracefully
// it gracefully.
t.Run("createStartDeleteScenario", func(t *testing.T) {
assert.NilError(t, wireUpSystem(ctx, h(), func(ctx context.Context, s *system) {
testCreateStartDeleteScenario(ctx, t, s, isPodDeletedGracefullyFunc, true)
@@ -138,7 +138,7 @@ func TestPodLifecycle(t *testing.T) {
})
// createStartDeleteScenarioWithDeletionErrorNotFound tests the flow if the pod was not found in the provider
// for some reason
// for some reason.
t.Run("createStartDeleteScenarioWithDeletionErrorNotFound", func(t *testing.T) {
mp := h()
mp.setErrorOnDelete(errdefs.NotFound("not found"))
@@ -148,7 +148,7 @@ func TestPodLifecycle(t *testing.T) {
})
// createStartDeleteScenarioWithDeletionRandomError tests the flow if the pod was unable to be deleted in the
// provider
// provider.
t.Run("createStartDeleteScenarioWithDeletionRandomError", func(t *testing.T) {
mp := h()
deletionFunc := func(ctx context.Context, watcher watch.Interface) error {
@@ -186,7 +186,7 @@ func TestPodLifecycle(t *testing.T) {
})
})
// failedPodScenario ensures that the VK ignores failed pods that were failed prior to the PC starting up
// failedPodScenario ensures that the VK ignores failed pods that were failed prior to the pod controller starting up.
t.Run("failedPodScenario", func(t *testing.T) {
t.Run("mockProvider", func(t *testing.T) {
mp := newMockProvider()
@@ -196,7 +196,7 @@ func TestPodLifecycle(t *testing.T) {
})
})
// succeededPodScenario ensures that the VK ignores succeeded pods that were succeeded prior to the PC starting up.
// succeededPodScenario ensures that the VK ignores succeeded pods that were succeeded prior to the pod controller starting up.
t.Run("succeededPodScenario", func(t *testing.T) {
t.Run("mockProvider", func(t *testing.T) {
mp := newMockProvider()
@@ -207,7 +207,7 @@ func TestPodLifecycle(t *testing.T) {
})
// updatePodWhileRunningScenario updates a pod while the VK is running to ensure the update is propagated
// to the provider
// to the provider.
t.Run("updatePodWhileRunningScenario", func(t *testing.T) {
t.Run("mockProvider", func(t *testing.T) {
mp := newMockProvider()

View File

@@ -74,7 +74,7 @@ type PodLifecycleHandler interface {
GetPods(context.Context) ([]*corev1.Pod, error)
}
// PodNotifier is used as an extenstion to PodLifecycleHandler to support async updates of pod statues.
// PodNotifier is used as an extension to PodLifecycleHandler to support async updates of pod statuses.
type PodNotifier interface {
// NotifyPods instructs the notifier to call the passed in function when
// the pod status changes. It should be called when a pod's status changes.
@@ -160,7 +160,7 @@ type PodControllerConfig struct {
ServiceInformer corev1informers.ServiceInformer
}
// NewPodController creates a new pod controller from the provided config
// NewPodController creates a new pod controller with the provided config.
func NewPodController(cfg PodControllerConfig) (*PodController, error) {
if cfg.PodClient == nil {
return nil, errdefs.InvalidInput("missing core client")

View File

@@ -26,19 +26,19 @@ const (
containerStatusTerminatedMessage = "Container was terminated. The exit code may not reflect the real exit code"
)
// syncProvderWrapper wraps a PodLifecycleHandler to give it async-like pod status notification behavior
// syncProviderWrapper wraps a PodLifecycleHandler to give it async-like pod status notification behavior.
type syncProviderWrapper struct {
PodLifecycleHandler
notify func(*corev1.Pod)
l corev1listers.PodLister
// deletedPods makes sure we don't set the "NotFOund" status
// deletedPods makes sure we don't set the "NotFound" status
// for pods which have been requested to be deleted.
// This is needed for our loop which just grabs pod statues every 5 seconds
// This is needed for our loop which just grabs pod statuses every 5 seconds.
deletedPods sync.Map
}
// This is used to clean up keys for deleted pods after they have been fully deleted in the API server
// This is used to clean up keys for deleted pods after they have been fully deleted in the API server.
type syncWrapper interface {
_deletePodKey(context.Context, string)
}
@@ -53,7 +53,7 @@ func (p *syncProviderWrapper) _deletePodKey(ctx context.Context, key string) {
}
func (p *syncProviderWrapper) DeletePod(ctx context.Context, pod *corev1.Pod) error {
log.G(ctx).Debug("syncProviderWrappped.DeletePod")
log.G(ctx).Debug("syncProviderWrappper.DeletePod")
key, err := cache.MetaNamespaceKeyFunc(pod)
if err != nil {
return err
@@ -148,7 +148,7 @@ func (p *syncProviderWrapper) syncPodStatuses(ctx context.Context) {
}
func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubernetes *corev1.Pod) error {
ctx, span := trace.StartSpan(ctx, "syncProviderWrapper.getPodStatus")
ctx, span := trace.StartSpan(ctx, "syncProviderWrapper.updatePodStatus")
defer span.End()
ctx = addPodAttributes(ctx, span, podFromKubernetes)
@@ -176,7 +176,7 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
return nil
}
// Only change the status when the pod was already up
// Only change the status when the pod was already up.
// Only doing so when the pod was successfully running makes sure we don't run into race conditions during pod creation.
if podFromKubernetes.Status.Phase == corev1.PodRunning || time.Since(podFromKubernetes.ObjectMeta.CreationTimestamp.Time) > time.Minute {
// Set the pod to failed, this makes sure if the underlying container implementation is gone that a new pod will be created.