[Sync Provider] Fix panic on not found pod status
This commit is contained in:
12
node/sync.go
12
node/sync.go
@@ -152,12 +152,14 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
|
|||||||
defer span.End()
|
defer span.End()
|
||||||
ctx = addPodAttributes(ctx, span, podFromKubernetes)
|
ctx = addPodAttributes(ctx, span, podFromKubernetes)
|
||||||
|
|
||||||
|
var statusErr error
|
||||||
podStatus, err := p.PodLifecycleHandler.GetPodStatus(ctx, podFromKubernetes.Namespace, podFromKubernetes.Name)
|
podStatus, err := p.PodLifecycleHandler.GetPodStatus(ctx, podFromKubernetes.Namespace, podFromKubernetes.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errdefs.IsNotFound(err) {
|
if !errdefs.IsNotFound(err) {
|
||||||
span.SetStatus(err)
|
span.SetStatus(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
statusErr = err
|
||||||
}
|
}
|
||||||
if podStatus != nil {
|
if podStatus != nil {
|
||||||
pod := podFromKubernetes.DeepCopy()
|
pod := podFromKubernetes.DeepCopy()
|
||||||
@@ -168,6 +170,7 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
|
|||||||
|
|
||||||
key, err := cache.MetaNamespaceKeyFunc(podFromKubernetes)
|
key, err := cache.MetaNamespaceKeyFunc(podFromKubernetes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
span.SetStatus(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -176,9 +179,13 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if podFromKubernetes.Status.Phase != corev1.PodRunning && time.Since(podFromKubernetes.ObjectMeta.CreationTimestamp.Time) <= time.Minute {
|
||||||
|
span.SetStatus(statusErr)
|
||||||
|
return statusErr
|
||||||
|
}
|
||||||
|
|
||||||
// Only change the status when the pod was already up.
|
// Only change the status when the pod was already up.
|
||||||
// Only doing so when the pod was successfully running makes sure we don't run into race conditions during pod creation.
|
// Only doing so when the pod was successfully running makes sure we don't run into race conditions during pod creation.
|
||||||
if podFromKubernetes.Status.Phase == corev1.PodRunning || time.Since(podFromKubernetes.ObjectMeta.CreationTimestamp.Time) > time.Minute {
|
|
||||||
// Set the pod to failed, this makes sure if the underlying container implementation is gone that a new pod will be created.
|
// Set the pod to failed, this makes sure if the underlying container implementation is gone that a new pod will be created.
|
||||||
podStatus = podFromKubernetes.Status.DeepCopy()
|
podStatus = podFromKubernetes.Status.DeepCopy()
|
||||||
podStatus.Phase = corev1.PodFailed
|
podStatus.Phase = corev1.PodFailed
|
||||||
@@ -199,9 +206,8 @@ func (p *syncProviderWrapper) updatePodStatus(ctx context.Context, podFromKubern
|
|||||||
}
|
}
|
||||||
podStatus.ContainerStatuses[i].State.Running = nil
|
podStatus.ContainerStatuses[i].State.Running = nil
|
||||||
}
|
}
|
||||||
log.G(ctx).Debug("Setting pod not found on pod status")
|
|
||||||
}
|
|
||||||
|
|
||||||
|
log.G(ctx).Debug("Setting pod not found on pod status")
|
||||||
pod := podFromKubernetes.DeepCopy()
|
pod := podFromKubernetes.DeepCopy()
|
||||||
podStatus.DeepCopyInto(&pod.Status)
|
podStatus.DeepCopyInto(&pod.Status)
|
||||||
p.notify(pod)
|
p.notify(pod)
|
||||||
|
|||||||
Reference in New Issue
Block a user