Fix race condition in tunnel server startup

Several places in the code used a 5-second retry loop to wait on
Runtime.Core to be set. This caused a race condition where OnChange
handlers could be added after the Wrangler shared informers were already
started. When this happened, the handlers were never called because the
shared informers they relied upon were not started.

Fix that by requiring anything that waits on Runtime.Core to run from a
cluster controller startup hook that is guaranteed to be called before
the shared informers are started, instead of just firing it off in a
goroutine that retries until it is set.

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2023-04-20 22:28:57 +00:00 committed by Brad Davidson
parent 1ca035accc
commit c44d33d29b
3 changed files with 17 additions and 40 deletions

View File

@ -82,7 +82,7 @@ func (c *Cluster) start(ctx context.Context) error {
if _, err := os.Stat(resetFile); err == nil { if _, err := os.Stat(resetFile); err == nil {
// Before removing reset file we need to delete the node passwd secret in case the node // Before removing reset file we need to delete the node passwd secret in case the node
// password from the previously restored snapshot differs from the current password on disk. // password from the previously restored snapshot differs from the current password on disk.
go c.deleteNodePasswdSecret(ctx) c.config.Runtime.ClusterControllerStarts["node-password-secret-cleanup"] = c.deleteNodePasswdSecret
os.Remove(resetFile) os.Remove(resetFile)
} }
@ -176,30 +176,13 @@ func (c *Cluster) setupEtcdProxy(ctx context.Context, etcdProxy etcd.Proxy) {
// deleteNodePasswdSecret wipes out the node password secret after restoration // deleteNodePasswdSecret wipes out the node password secret after restoration
func (c *Cluster) deleteNodePasswdSecret(ctx context.Context) { func (c *Cluster) deleteNodePasswdSecret(ctx context.Context) {
t := time.NewTicker(5 * time.Second) nodeName := os.Getenv("NODE_NAME")
defer t.Stop() secretsClient := c.config.Runtime.Core.Core().V1().Secret()
for range t.C { if err := nodepassword.Delete(secretsClient, nodeName); err != nil {
nodeName := os.Getenv("NODE_NAME") if apierrors.IsNotFound(err) {
if nodeName == "" { logrus.Debugf("node password secret is not found for node %s", nodeName)
logrus.Infof("waiting for node name to be set") return
continue
} }
// the core factory may not yet be initialized so we logrus.Warnf("failed to delete old node password secret: %v", err)
// want to wait until it is so not to evoke a panic.
if c.config.Runtime.Core == nil {
logrus.Infof("runtime is not yet initialized")
continue
}
secretsClient := c.config.Runtime.Core.Core().V1().Secret()
if err := nodepassword.Delete(secretsClient, nodeName); err != nil {
if apierrors.IsNotFound(err) {
logrus.Debugf("node password secret is not found for node %s", nodeName)
return
}
logrus.Warnf("failed to delete old node password secret: %v", err)
continue
}
return
} }
} }

View File

@ -10,7 +10,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time"
"github.com/k3s-io/k3s/pkg/daemons/config" "github.com/k3s-io/k3s/pkg/daemons/config"
"github.com/k3s-io/k3s/pkg/daemons/control/proxy" "github.com/k3s-io/k3s/pkg/daemons/control/proxy"
@ -45,7 +44,7 @@ func setupTunnel(ctx context.Context, cfg *config.Control) (http.Handler, error)
server: remotedialer.New(authorizer, loggingErrorWriter), server: remotedialer.New(authorizer, loggingErrorWriter),
egress: map[string]bool{}, egress: map[string]bool{},
} }
go tunnel.watch(ctx) cfg.Runtime.ClusterControllerStarts["tunnel-server"] = tunnel.watch
return tunnel, nil return tunnel, nil
} }
@ -112,17 +111,10 @@ func (t *TunnelServer) watch(ctx context.Context) {
return return
} }
for { t.config.Runtime.Core.Core().V1().Node().OnChange(ctx, version.Program+"-tunnel-server", t.onChangeNode)
if t.config.Runtime.Core != nil { switch t.config.EgressSelectorMode {
t.config.Runtime.Core.Core().V1().Node().OnChange(ctx, version.Program+"-tunnel-server", t.onChangeNode) case config.EgressSelectorModeCluster, config.EgressSelectorModePod:
switch t.config.EgressSelectorMode { t.config.Runtime.Core.Core().V1().Pod().OnChange(ctx, version.Program+"-tunnel-server", t.onChangePod)
case config.EgressSelectorModeCluster, config.EgressSelectorModePod:
t.config.Runtime.Core.Core().V1().Pod().OnChange(ctx, version.Program+"-tunnel-server", t.onChangePod)
}
return
}
logrus.Infof("Tunnel server egress proxy waiting for runtime core to become available")
time.Sleep(5 * time.Second)
} }
} }
@ -173,7 +165,6 @@ func (t *TunnelServer) onChangePod(podName string, pod *v1.Pod) (*v1.Pod, error)
} }
} }
return pod, nil return pod, nil
} }
// serveConnect attempts to handle the HTTP CONNECT request by dialing // serveConnect attempts to handle the HTTP CONNECT request by dialing

View File

@ -168,8 +168,11 @@ func apiserverControllers(ctx context.Context, sc *Context, config *Config) {
panic(errors.Wrapf(err, "failed to start %s leader controller", util.GetFunctionName(controller))) panic(errors.Wrapf(err, "failed to start %s leader controller", util.GetFunctionName(controller)))
} }
} }
// Re-run context startup after core and leader-elected controllers have started. Additional
// informer caches may need to start for the newly added OnChange callbacks.
if err := sc.Start(ctx); err != nil { if err := sc.Start(ctx); err != nil {
panic(err) panic(errors.Wrap(err, "failed to start wranger controllers"))
} }
} }