Move ClusterResetRestore handling ControlConfig setup

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2022-01-11 16:59:41 -08:00 committed by Brad Davidson
parent a3190bd146
commit a0cadcd343

View File

@ -165,43 +165,6 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
return errors.New("invalid flag use; --cluster-reset required with --cluster-reset-restore-path")
}
// make sure components are disabled so we only perform a restore
// and bail out
if cfg.ClusterResetRestorePath != "" && cfg.ClusterReset {
serverConfig.ControlConfig.ClusterInit = true
serverConfig.ControlConfig.DisableAPIServer = true
serverConfig.ControlConfig.DisableControllerManager = true
serverConfig.ControlConfig.DisableScheduler = true
serverConfig.ControlConfig.DisableCCM = true
// only close the agentReady channel in case of k3s restoration, because k3s does not start
// the agent until server returns successfully, unlike rke2's agent which starts in parallel
// with the server
if serverConfig.ControlConfig.SupervisorPort == serverConfig.ControlConfig.HTTPSPort {
close(agentReady)
}
dataDir, err := datadir.LocalHome(cfg.DataDir, false)
if err != nil {
return err
}
// delete local loadbalancers state for apiserver and supervisor servers
loadbalancer.ResetLoadBalancer(filepath.Join(dataDir, "agent"), loadbalancer.SupervisorServiceName)
loadbalancer.ResetLoadBalancer(filepath.Join(dataDir, "agent"), loadbalancer.APIServerServiceName)
// at this point we're doing a restore. Check to see if we've
// passed in a token and if not, check if the token file exists.
// If it doesn't, return an error indicating the token is necessary.
if cfg.Token == "" {
tokenFile := filepath.Join(dataDir, "server", "token")
if _, err := os.Stat(tokenFile); err != nil {
if os.IsNotExist(err) {
return errors.New(tokenFile + " does not exist, please pass --token to complete the restoration")
}
}
}
}
serverConfig.ControlConfig.ClusterReset = cfg.ClusterReset
serverConfig.ControlConfig.ClusterResetRestorePath = cfg.ClusterResetRestorePath
serverConfig.ControlConfig.SystemDefaultRegistry = cfg.SystemDefaultRegistry
@ -417,6 +380,43 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
return errors.Wrap(err, "invalid tls-cipher-suites")
}
// make sure components are disabled so we only perform a restore
// and bail out
if cfg.ClusterResetRestorePath != "" && cfg.ClusterReset {
serverConfig.ControlConfig.ClusterInit = true
serverConfig.ControlConfig.DisableAPIServer = true
serverConfig.ControlConfig.DisableControllerManager = true
serverConfig.ControlConfig.DisableScheduler = true
serverConfig.ControlConfig.DisableCCM = true
// only close the agentReady channel in case of k3s restoration, because k3s does not start
// the agent until server returns successfully, unlike rke2's agent which starts in parallel
// with the server
if serverConfig.ControlConfig.SupervisorPort == serverConfig.ControlConfig.HTTPSPort {
close(agentReady)
}
dataDir, err := datadir.LocalHome(cfg.DataDir, false)
if err != nil {
return err
}
// delete local loadbalancers state for apiserver and supervisor servers
loadbalancer.ResetLoadBalancer(filepath.Join(dataDir, "agent"), loadbalancer.SupervisorServiceName)
loadbalancer.ResetLoadBalancer(filepath.Join(dataDir, "agent"), loadbalancer.APIServerServiceName)
// at this point we're doing a restore. Check to see if we've
// passed in a token and if not, check if the token file exists.
// If it doesn't, return an error indicating the token is necessary.
if cfg.Token == "" {
tokenFile := filepath.Join(dataDir, "server", "token")
if _, err := os.Stat(tokenFile); err != nil {
if os.IsNotExist(err) {
return errors.New(tokenFile + " does not exist, please pass --token to complete the restoration")
}
}
}
}
logrus.Info("Starting " + version.Program + " " + app.App.Version)
notifySocket := os.Getenv("NOTIFY_SOCKET")