Do not close pod sync, use context cancel instead. (#402)
Closing the channel is racey and can lead to a panic on exit. Instead rely on context cancellation to know if workers should exit.
This commit is contained in:
@@ -104,11 +104,16 @@ func New(ctx context.Context, cfg Config) (s *Server, retErr error) {
|
||||
tick := time.Tick(5 * time.Second)
|
||||
|
||||
go func() {
|
||||
for range tick {
|
||||
ctx, span := trace.StartSpan(ctx, "syncActualState")
|
||||
s.updateNode(ctx)
|
||||
s.updatePodStatuses(ctx)
|
||||
span.End()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-tick:
|
||||
ctx, span := trace.StartSpan(ctx, "syncActualState")
|
||||
s.updateNode(ctx)
|
||||
s.updatePodStatuses(ctx)
|
||||
span.End()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -128,12 +133,6 @@ func (s *Server) Run(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop shutsdown the server.
|
||||
// It does not shutdown pods assigned to the virtual node.
|
||||
func (s *Server) Stop() {
|
||||
close(s.podCh)
|
||||
}
|
||||
|
||||
// reconcile is the main reconciliation loop that compares differences between Kubernetes and
|
||||
// the active provider and reconciles the differences.
|
||||
func (s *Server) reconcile(ctx context.Context) {
|
||||
|
||||
Reference in New Issue
Block a user