Use standard logging package (#323)
This commit is contained in:
@@ -1,24 +1,36 @@
|
||||
package vkubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
|
||||
)
|
||||
|
||||
var p Provider
|
||||
var r mux.Router
|
||||
|
||||
func loggingContext(r *http.Request) context.Context {
|
||||
ctx := r.Context()
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"uri": r.RequestURI,
|
||||
"vars": mux.Vars(r),
|
||||
})
|
||||
return log.WithLogger(ctx, logger)
|
||||
}
|
||||
|
||||
func NotFound(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("404 request not found. \n %v", mux.Vars(r))
|
||||
logger := log.G(loggingContext(r))
|
||||
log.Trace(logger, "404 request not found")
|
||||
http.Error(w, "404 request not found", http.StatusNotFound)
|
||||
}
|
||||
|
||||
@@ -35,37 +47,45 @@ func ApiserverStart(provider Provider) {
|
||||
r.NotFoundHandler = http.HandlerFunc(NotFound)
|
||||
|
||||
if err := http.ListenAndServeTLS(addr, certFilePath, keyFilePath, r); err != nil {
|
||||
log.Println(err)
|
||||
log.G(context.TODO()).WithError(err).Error("error setting up http server")
|
||||
}
|
||||
}
|
||||
|
||||
func ApiServerHandler(w http.ResponseWriter, req *http.Request) {
|
||||
vars := mux.Vars(req)
|
||||
if len(vars) == 3 {
|
||||
namespace := vars["namespace"]
|
||||
pod := vars["pod"]
|
||||
container := vars["container"]
|
||||
tail := 10
|
||||
q := req.URL.Query()
|
||||
queryTail := q.Get("tailLines")
|
||||
if queryTail != "" {
|
||||
t, err := strconv.Atoi(queryTail)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
io.WriteString(w, err.Error())
|
||||
} else {
|
||||
tail = t
|
||||
}
|
||||
}
|
||||
podsLogs, err := p.GetContainerLogs(namespace, pod, container, tail)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
io.WriteString(w, err.Error())
|
||||
} else {
|
||||
io.WriteString(w, podsLogs)
|
||||
}
|
||||
} else {
|
||||
if len(vars) != 3 {
|
||||
NotFound(w, req)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := loggingContext(req)
|
||||
|
||||
namespace := vars["namespace"]
|
||||
pod := vars["pod"]
|
||||
container := vars["container"]
|
||||
tail := 10
|
||||
q := req.URL.Query()
|
||||
|
||||
if queryTail := q.Get("tailLines"); queryTail != "" {
|
||||
t, err := strconv.Atoi(queryTail)
|
||||
if err != nil {
|
||||
logger := log.G(context.TODO()).WithError(err)
|
||||
log.Trace(logger, "could not parse tailLines")
|
||||
http.Error(w, fmt.Sprintf("could not parse \"tailLines\": %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
tail = t
|
||||
}
|
||||
|
||||
podsLogs, err := p.GetContainerLogs(namespace, pod, container, tail)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("error getting container logs")
|
||||
http.Error(w, fmt.Sprintf("error while getting container logs: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := io.WriteString(w, podsLogs); err != nil {
|
||||
log.G(ctx).WithError(err).Warn("error writing response to client")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
package vkubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers/aws"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers/azure"
|
||||
@@ -56,8 +55,6 @@ func lookupProvider(provider, providerConfig string, rm *manager.ResourceManager
|
||||
case "vic":
|
||||
return vic.NewVicProvider(providerConfig, rm, nodeName, operatingSystem)
|
||||
default:
|
||||
fmt.Printf("Provider '%s' is not supported\n", provider)
|
||||
return nil, errors.New("provider not supported")
|
||||
}
|
||||
var p Provider
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
package vkubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers/aws"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers/azure"
|
||||
@@ -43,8 +42,6 @@ func lookupProvider(provider, providerConfig string, rm *manager.ResourceManager
|
||||
case "sfmesh":
|
||||
return sfmesh.NewSFMeshProvider(rm, nodeName, operatingSystem, internalIP, daemonEndpointPort)
|
||||
default:
|
||||
fmt.Printf("Provider '%s' is not supported\n", provider)
|
||||
return nil, errors.New("provider is not supported")
|
||||
}
|
||||
var p Provider
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
package vkubelet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers/aws"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/providers/azure"
|
||||
@@ -43,8 +42,6 @@ func lookupProvider(provider, providerConfig string, rm *manager.ResourceManager
|
||||
case "sfmesh":
|
||||
return sfmesh.NewSFMeshProvider(rm, nodeName, operatingSystem, internalIP, daemonEndpointPort)
|
||||
default:
|
||||
fmt.Printf("Provider '%s' is not supported\n", provider)
|
||||
return nil, errors.New("provider not supported")
|
||||
}
|
||||
var p Provider
|
||||
return p, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package vkubelet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
pkgerrors "github.com/pkg/errors"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/log"
|
||||
"github.com/virtual-kubelet/virtual-kubelet/manager"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -69,7 +71,10 @@ func New(nodeName, operatingSystem, namespace, kubeConfig, provider, providerCon
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rm := manager.NewResourceManager(clientset)
|
||||
rm, err := manager.NewResourceManager(clientset)
|
||||
if err != nil {
|
||||
return nil, pkgerrors.Wrap(err, "error creating resource manager")
|
||||
}
|
||||
|
||||
daemonEndpointPortEnv := os.Getenv("KUBELET_PORT")
|
||||
if daemonEndpointPortEnv == "" {
|
||||
@@ -101,7 +106,7 @@ func New(nodeName, operatingSystem, namespace, kubeConfig, provider, providerCon
|
||||
case "PreferNoSchedule":
|
||||
vkTaintEffect = corev1.TaintEffectPreferNoSchedule
|
||||
default:
|
||||
fmt.Printf("Taint effect '%s' is not supported\n", vkTaintEffectEnv)
|
||||
return nil, pkgerrors.Errorf("taint effect %q is not supported", vkTaintEffectEnv)
|
||||
}
|
||||
|
||||
taint := corev1.Taint{
|
||||
@@ -125,17 +130,21 @@ func New(nodeName, operatingSystem, namespace, kubeConfig, provider, providerCon
|
||||
provider: p,
|
||||
}
|
||||
|
||||
if err = s.registerNode(); err != nil {
|
||||
ctx := context.TODO()
|
||||
ctx = log.WithLogger(ctx, log.G(ctx))
|
||||
|
||||
if err = s.registerNode(ctx); err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
go ApiserverStart(p)
|
||||
|
||||
tick := time.Tick(5 * time.Second)
|
||||
|
||||
go func() {
|
||||
for range tick {
|
||||
s.updateNode()
|
||||
s.updatePodStatuses()
|
||||
s.updateNode(ctx)
|
||||
s.updatePodStatuses(ctx)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -143,7 +152,7 @@ func New(nodeName, operatingSystem, namespace, kubeConfig, provider, providerCon
|
||||
}
|
||||
|
||||
// registerNode registers this virtual node with the Kubernetes API.
|
||||
func (s *Server) registerNode() error {
|
||||
func (s *Server) registerNode(ctx context.Context) error {
|
||||
taints := make([]corev1.Taint, 0)
|
||||
|
||||
if !s.disableTaint {
|
||||
@@ -182,14 +191,14 @@ func (s *Server) registerNode() error {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Node '%s' with OS type '%s' registered\n", node.Name, node.Status.NodeInfo.OperatingSystem)
|
||||
log.G(ctx).Info("Registered node")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run starts the server, registers it with Kubernetes and begins watching/reconciling the cluster.
|
||||
// Run will block until Stop is called or a SIGINT or SIGTERM signal is received.
|
||||
func (s *Server) Run() error {
|
||||
func (s *Server) Run(ctx context.Context) error {
|
||||
shouldStop := false
|
||||
|
||||
sig := make(chan os.Signal, 1)
|
||||
@@ -207,15 +216,15 @@ func (s *Server) Run() error {
|
||||
|
||||
pods, err := s.k8sClient.CoreV1().Pods(s.namespace).List(opts)
|
||||
if err != nil {
|
||||
log.Fatal("Failed to list pods", err)
|
||||
return pkgerrors.Wrap(err, "error getting pod list")
|
||||
}
|
||||
s.resourceManager.SetPods(pods)
|
||||
s.reconcile()
|
||||
s.reconcile(ctx)
|
||||
|
||||
opts.ResourceVersion = pods.ResourceVersion
|
||||
s.podWatcher, err = s.k8sClient.CoreV1().Pods(s.namespace).Watch(opts)
|
||||
if err != nil {
|
||||
log.Fatal("Failed to watch pods", err)
|
||||
return pkgerrors.Wrap(err, "failed to watch pods")
|
||||
}
|
||||
|
||||
loop:
|
||||
@@ -224,15 +233,15 @@ func (s *Server) Run() error {
|
||||
case ev, ok := <-s.podWatcher.ResultChan():
|
||||
if !ok {
|
||||
if shouldStop {
|
||||
log.Println("Pod watcher is stopped.")
|
||||
log.G(ctx).Info("Pod watcher is stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Println("Pod watcher connection is closed unexpectedly.")
|
||||
log.G(ctx).Error("Pod watcher connection is closed unexpectedly")
|
||||
break loop
|
||||
}
|
||||
|
||||
log.Println("Pod watcher event is received:", ev.Type)
|
||||
log.G(ctx).WithField("type", ev.Type).Debug("Pod watcher event is received")
|
||||
reconcile := false
|
||||
switch ev.Type {
|
||||
case watch.Added:
|
||||
@@ -244,7 +253,7 @@ func (s *Server) Run() error {
|
||||
}
|
||||
|
||||
if reconcile {
|
||||
s.reconcile()
|
||||
s.reconcile(ctx)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -262,17 +271,17 @@ func (s *Server) Stop() {
|
||||
}
|
||||
|
||||
// updateNode updates the node status within Kubernetes with updated NodeConditions.
|
||||
func (s *Server) updateNode() {
|
||||
func (s *Server) updateNode(ctx context.Context) {
|
||||
opts := metav1.GetOptions{}
|
||||
n, err := s.k8sClient.CoreV1().Nodes().Get(s.nodeName, opts)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
log.Println("Failed to retrieve node:", err)
|
||||
log.G(ctx).WithError(err).Error("Failed to retrieve node")
|
||||
return
|
||||
}
|
||||
|
||||
if errors.IsNotFound(err) {
|
||||
if err = s.registerNode(); err != nil {
|
||||
log.Println("Failed to register node:", err)
|
||||
if err = s.registerNode(ctx); err != nil {
|
||||
log.G(ctx).WithError(err).Error("Failed to register node")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -288,27 +297,31 @@ func (s *Server) updateNode() {
|
||||
|
||||
n, err = s.k8sClient.CoreV1().Nodes().UpdateStatus(n)
|
||||
if err != nil {
|
||||
log.Println("Failed to update node:", err)
|
||||
log.G(ctx).WithError(err).Error("Failed to update node")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// reconcile is the main reconciliation loop that compares differences between Kubernetes and
|
||||
// the active provider and reconciles the differences.
|
||||
func (s *Server) reconcile() {
|
||||
log.Println("Start reconcile.")
|
||||
func (s *Server) reconcile(ctx context.Context) {
|
||||
logger := log.G(ctx)
|
||||
logger.Debug("Start reconcile")
|
||||
defer logger.Debug("End reconcile")
|
||||
|
||||
providerPods, err := s.provider.GetPods()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
logger.WithError(err).Error("Error getting pod list from provider")
|
||||
return
|
||||
}
|
||||
|
||||
for _, pod := range providerPods {
|
||||
// Delete pods that don't exist in Kubernetes
|
||||
if p := s.resourceManager.GetPod(pod.Namespace, pod.Name); p == nil || p.DeletionTimestamp != nil {
|
||||
log.Printf("Deleting pod '%s'\n", pod.Name)
|
||||
if err := s.deletePod(pod); err != nil {
|
||||
log.Printf("Error deleting pod '%s': %s\n", pod.Name, err)
|
||||
logger := logger.WithField("pod", pod.Name)
|
||||
logger.Debug("Deleting pod '%s'\n", pod.Name)
|
||||
if err := s.deletePod(ctx, pod); err != nil {
|
||||
logger.WithError(err).Error("Error deleting pod")
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -317,6 +330,7 @@ func (s *Server) reconcile() {
|
||||
// Create any pods for k8s pods that don't exist in the provider
|
||||
pods := s.resourceManager.GetPods()
|
||||
for _, pod := range pods {
|
||||
logger := logger.WithField("pod", pod.Name)
|
||||
var providerPod *corev1.Pod
|
||||
for _, p := range providerPods {
|
||||
if p.Namespace == pod.Namespace && p.Name == pod.Name {
|
||||
@@ -326,30 +340,33 @@ func (s *Server) reconcile() {
|
||||
}
|
||||
|
||||
if pod.DeletionTimestamp == nil && pod.Status.Phase != corev1.PodFailed && providerPod == nil {
|
||||
log.Printf("Creating pod '%s'\n", pod.Name)
|
||||
if err := s.createPod(pod); err != nil {
|
||||
log.Printf("Error creating pod '%s': %s\n", pod.Name, err)
|
||||
logger.Debug("Creating pod")
|
||||
if err := s.createPod(ctx, pod); err != nil {
|
||||
logger.WithError(err).Error("Error creating pod")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Delete pod if DeletionTimestamp is set
|
||||
if pod.DeletionTimestamp != nil {
|
||||
log.Printf("Pod '%s' is pending deletion.\n", pod.Name)
|
||||
log.Trace(logger, "Pod pending deletion")
|
||||
var err error
|
||||
if err = s.deletePod(pod); err != nil {
|
||||
log.Printf("Error deleting pod '%s': %s\n", pod.Name, err)
|
||||
if err = s.deletePod(ctx, pod); err != nil {
|
||||
logger.WithError(err).Error("Error deleting pod")
|
||||
continue
|
||||
}
|
||||
log.Trace(logger, "Pod deletion complete")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) createPod(pod *corev1.Pod) error {
|
||||
func (s *Server) createPod(ctx context.Context, pod *corev1.Pod) error {
|
||||
if err := s.populateSecretsAndConfigMapsInEnv(pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger := log.G(ctx).WithField("pod", pod.Name)
|
||||
|
||||
if origErr := s.provider.CreatePod(pod); origErr != nil {
|
||||
pod.ResourceVersion = "" // Blank out resource version to prevent object has been modified error
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
@@ -358,29 +375,29 @@ func (s *Server) createPod(pod *corev1.Pod) error {
|
||||
|
||||
_, err := s.k8sClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
|
||||
if err != nil {
|
||||
log.Println("Failed to update pod status:", err)
|
||||
return origErr
|
||||
logger.WithError(err).Warn("Failed to update pod status")
|
||||
}
|
||||
|
||||
return origErr
|
||||
}
|
||||
|
||||
log.Printf("Pod '%s' created.\n", pod.Name)
|
||||
logger.Info("Pod created")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) deletePod(pod *corev1.Pod) error {
|
||||
func (s *Server) deletePod(ctx context.Context, pod *corev1.Pod) error {
|
||||
var delErr error
|
||||
if delErr = s.provider.DeletePod(pod); delErr != nil && errors.IsNotFound(delErr) {
|
||||
return delErr
|
||||
}
|
||||
|
||||
logger := log.G(ctx).WithField("pod", pod.Name)
|
||||
if !errors.IsNotFound(delErr) {
|
||||
var grace int64
|
||||
if err := s.k8sClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &grace}); err != nil && errors.IsNotFound(err) {
|
||||
if errors.IsNotFound(err) {
|
||||
log.Printf("Pod '%s' doesn't exist.\n", pod.Name)
|
||||
logger.Error("Pod doesn't exist")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -388,15 +405,14 @@ func (s *Server) deletePod(pod *corev1.Pod) error {
|
||||
}
|
||||
|
||||
s.resourceManager.DeletePod(pod)
|
||||
|
||||
log.Printf("Pod '%s' deleted.\n", pod.Name)
|
||||
logger.Info("Pod deleted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updatePodStatuses syncs the providers pod status with the kubernetes pod status.
|
||||
func (s *Server) updatePodStatuses() {
|
||||
func (s *Server) updatePodStatuses(ctx context.Context) {
|
||||
// Update all the pods with the provider status.
|
||||
pods := s.resourceManager.GetPods()
|
||||
for _, pod := range pods {
|
||||
@@ -406,7 +422,7 @@ func (s *Server) updatePodStatuses() {
|
||||
|
||||
status, err := s.provider.GetPodStatus(pod.Namespace, pod.Name)
|
||||
if err != nil {
|
||||
log.Printf("Error retrieving pod '%s' in namespace '%s' status from provider: %s\n", pod.Name, pod.Namespace, err)
|
||||
log.G(ctx).WithField("pod", pod.Name).Error("Error retrieving pod status")
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user