Refactor http server stuff (#466)

* Don't start things in New

* Move http server handling up to daemon.

This removes the burdern of dealing with listeners, http servers, etc in
the core framework.

Instead provide helpers to attach the appropriate routes to the
caller's serve mux.

With this change, the vkubelet package only helps callers setup HTTP
rather than forcing a specific HTTP config on them.
This commit is contained in:
Brian Goff
2018-12-21 11:45:07 -08:00
committed by GitHub
parent f6bc338033
commit 0d14914e85
4 changed files with 206 additions and 104 deletions

View File

@@ -2,16 +2,13 @@ package vkubelet
import (
"context"
"net"
"time"
pkgerrors "github.com/pkg/errors"
"go.opencensus.io/trace"
corev1 "k8s.io/api/core/v1"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"github.com/virtual-kubelet/virtual-kubelet/log"
"github.com/virtual-kubelet/virtual-kubelet/manager"
"github.com/virtual-kubelet/virtual-kubelet/providers"
)
@@ -34,9 +31,7 @@ type Server struct {
// Config is used to configure a new server.
type Config struct {
APIConfig APIConfig
Client *kubernetes.Clientset
MetricsAddr string
Namespace string
NodeName string
Provider providers.Provider
@@ -46,16 +41,13 @@ type Config struct {
PodInformer corev1informers.PodInformer
}
// APIConfig is used to configure the API server of the virtual kubelet.
type APIConfig struct {
CertPath string
KeyPath string
Addr string
}
// New creates a new virtual-kubelet server.
func New(ctx context.Context, cfg Config) (s *Server, retErr error) {
s = &Server{
// This is the entrypoint to this package.
//
// This creates but does not start the server.
// You must call `Run` on the returned object to start the server.
func New(cfg Config) *Server {
return &Server{
namespace: cfg.Namespace,
nodeName: cfg.NodeName,
taint: cfg.Taint,
@@ -65,61 +57,43 @@ func New(ctx context.Context, cfg Config) (s *Server, retErr error) {
podSyncWorkers: cfg.PodSyncWorkers,
podInformer: cfg.PodInformer,
}
ctx = log.WithLogger(ctx, log.G(ctx))
apiL, err := net.Listen("tcp", cfg.APIConfig.Addr)
if err != nil {
return nil, pkgerrors.Wrap(err, "error setting up API listener")
}
defer func() {
if retErr != nil {
apiL.Close()
}
}()
go KubeletServerStart(cfg.Provider, apiL, cfg.APIConfig.CertPath, cfg.APIConfig.KeyPath)
if cfg.MetricsAddr != "" {
metricsL, err := net.Listen("tcp", cfg.MetricsAddr)
if err != nil {
return nil, pkgerrors.Wrap(err, "error setting up metrics listener")
}
defer func() {
if retErr != nil {
metricsL.Close()
}
}()
go MetricsServerStart(cfg.Provider, metricsL)
} else {
log.G(ctx).Info("Skipping metrics server startup since no address was provided")
}
if err := s.registerNode(ctx); err != nil {
return s, err
}
go func() {
tick := time.NewTicker(5 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return
case <-tick.C:
ctx, span := trace.StartSpan(ctx, "syncActualState")
s.updateNode(ctx)
s.updatePodStatuses(ctx)
span.End()
}
}
}()
return s, nil
}
// Run creates and starts an instance of the pod controller, blocking until it stops.
//
// Note that this does not setup the HTTP routes that are used to expose pod
// info to the Kubernetes API Server, such as logs, metrics, exec, etc.
// See `AttachPodRoutes` and `AttachMetricsRoutes` to set these up.
func (s *Server) Run(ctx context.Context) error {
if err := s.registerNode(ctx); err != nil {
return err
}
go s.providerSyncLoop(ctx)
return NewPodController(s).Run(ctx, s.podSyncWorkers)
}
// providerSyncLoop syncronizes pod states from the provider back to kubernetes
func (s *Server) providerSyncLoop(ctx context.Context) {
// TODO(@cpuguy83): Ticker does not seem like the right thing to use here. A
// ticker keeps ticking while we are updating state, which can be a long
// operation. This would lead to just a constant re-sync rather than sleeping
// for 5 seconds between each loop.
//
// Leaving this note here as fixing is out of scope for my current changeset.
tick := time.NewTicker(5 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return
case <-tick.C:
ctx, span := trace.StartSpan(ctx, "syncActualState")
s.updateNode(ctx)
s.updatePodStatuses(ctx)
span.End()
}
}
}