Have Bootstrap Data Stored in etcd at Completed Start (#3038)

* have state stored in etcd at completed start and remove unneeded code
This commit is contained in:
Brian Downs 2021-03-11 13:07:40 -07:00 committed by GitHub
parent 69f96d6225
commit 7c99f8645d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 42 additions and 67 deletions

View File

@ -26,7 +26,6 @@ func (c *Cluster) Bootstrap(ctx context.Context) error {
if err != nil {
return err
}
c.shouldBootstrap = shouldBootstrap
if shouldBootstrap {

View File

@ -5,7 +5,6 @@ import (
"net/url"
"strings"
"github.com/k3s-io/kine/pkg/client"
"github.com/k3s-io/kine/pkg/endpoint"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/clientaccess"
@ -25,12 +24,12 @@ type Cluster struct {
etcdConfig endpoint.ETCDConfig
joining bool
saveBootstrap bool
storageClient client.Client
}
// Start creates the dynamic tls listener, http request handler,
// handles starting and writing/reading bootstrap data, and returns a channel
// that will be closed when datastore is ready.
// that will be closed when datastore is ready. If embedded etcd is in use,
// a secondary call to Cluster.save is made.
func (c *Cluster) Start(ctx context.Context) (<-chan struct{}, error) {
// Set up the dynamiclistener and http request handlers
if err := c.initClusterAndHTTPS(ctx); err != nil {
@ -93,7 +92,20 @@ func (c *Cluster) Start(ctx context.Context) (<-chan struct{}, error) {
}
}
return ready, c.startStorage(ctx)
if err := c.startStorage(ctx); err != nil {
return nil, err
}
// at this point, if etcd is in use, it's up, ready,
// and bootstrapping is complete so save the bootstrap
// data
if c.managedDB != nil {
if err := c.save(ctx); err != nil {
return nil, err
}
}
return ready, nil
}
// startStorage starts the kine listener and configures the endpoints, if necessary.

View File

@ -9,7 +9,6 @@ import (
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
@ -50,53 +49,6 @@ func (c *Cluster) testClusterDB(ctx context.Context) (<-chan struct{}, error) {
return result, nil
}
// cleanCerts removes existing certificatates previously
// generated for use by the cluster.
func (c *Cluster) cleanCerts() {
certs := []string{filepath.Join(c.config.DataDir, "tls", "client-ca.crt"),
filepath.Join(c.config.DataDir, "tls", "client-ca.key"),
filepath.Join(c.config.DataDir, "tls", "server-ca.crt"),
filepath.Join(c.config.DataDir, "tls", "server-ca.key"),
filepath.Join(c.config.DataDir, "tls", "request-header-ca.crt"),
filepath.Join(c.config.DataDir, "tls", "request-header-ca.key"),
filepath.Join(c.config.DataDir, "tls", "service.key"),
filepath.Join(c.config.DataDir, "tls", "client-admin.crt"),
filepath.Join(c.config.DataDir, "tls", "client-admin.key"),
filepath.Join(c.config.DataDir, "tls", "client-controller.crt"),
filepath.Join(c.config.DataDir, "tls", "client-controller.key"),
filepath.Join(c.config.DataDir, "tls", "client-cloud-controller.crt"),
filepath.Join(c.config.DataDir, "tls", "client-cloud-controller.key"),
filepath.Join(c.config.DataDir, "tls", "client-scheduler.crt"),
filepath.Join(c.config.DataDir, "tls", "client-scheduler.key"),
filepath.Join(c.config.DataDir, "tls", "client-kube-apiserver.crt"),
filepath.Join(c.config.DataDir, "tls", "client-kube-apiserver.key"),
filepath.Join(c.config.DataDir, "tls", "client-kube-proxy.crt"),
filepath.Join(c.config.DataDir, "tls", "client-kube-proxy.key"),
filepath.Join(c.config.DataDir, "tls", "client-"+version.Program+"-controller.crt"),
filepath.Join(c.config.DataDir, "tls", "client-"+version.Program+"-controller.key"),
filepath.Join(c.config.DataDir, "tls", "serving-kube-apiserver.crt"),
filepath.Join(c.config.DataDir, "tls", "serving-kube-apiserver.key"),
filepath.Join(c.config.DataDir, "tls", "client-kubelet.key"),
filepath.Join(c.config.DataDir, "tls", "serving-kubelet.key"),
filepath.Join(c.config.DataDir, "tls", "serving-kubelet.key"),
filepath.Join(c.config.DataDir, "tls", "client-auth-proxy.key"),
filepath.Join(c.config.DataDir, "tls", "etcd", "server-ca.crt"),
filepath.Join(c.config.DataDir, "tls", "etcd", "server-ca.key"),
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-ca.crt"),
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-ca.key"),
filepath.Join(c.config.DataDir, "tls", "etcd", "server-client.crt"),
filepath.Join(c.config.DataDir, "tls", "etcd", "server-client.key"),
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-server-client.crt"),
filepath.Join(c.config.DataDir, "tls", "etcd", "peer-server-client.key"),
filepath.Join(c.config.DataDir, "tls", "etcd", "client.crt"),
filepath.Join(c.config.DataDir, "tls", "etcd", "client.key"),
}
for _, cert := range certs {
os.Remove(cert)
}
}
// start starts the database, unless a cluster reset has been requested, in which case
// it does that instead.
func (c *Cluster) start(ctx context.Context) error {
@ -105,7 +57,15 @@ func (c *Cluster) start(ctx context.Context) error {
return nil
}
if c.config.ClusterReset {
switch {
case c.config.ClusterReset && c.config.ClusterResetRestorePath != "":
rebootstrap := func() error {
return c.storageBootstrap(ctx)
}
if err := c.managedDB.Reset(ctx, rebootstrap); err != nil {
return err
}
case c.config.ClusterReset:
if _, err := os.Stat(resetFile); err != nil {
if !os.IsNotExist(err) {
return err
@ -113,14 +73,8 @@ func (c *Cluster) start(ctx context.Context) error {
} else {
return fmt.Errorf("cluster-reset was successfully performed, please remove the cluster-reset flag and start %s normally, if you need to perform another cluster reset, you must first manually delete the %s file", version.Program, resetFile)
}
}
rebootstrap := func() error {
return c.storageBootstrap(ctx)
}
if err := c.managedDB.Reset(ctx, rebootstrap, c.cleanCerts); err != nil {
return err
}
}
// removing the reset file and ignore error if the file doesn't exist
os.Remove(resetFile)

View File

@ -16,7 +16,7 @@ var (
type Driver interface {
IsInitialized(ctx context.Context, config *config.Control) (bool, error)
Register(ctx context.Context, config *config.Control, handler http.Handler) (http.Handler, error)
Reset(ctx context.Context, reboostrap func() error, cleanCerts func()) error
Reset(ctx context.Context, reboostrap func() error) error
Start(ctx context.Context, clientAccessInfo *clientaccess.Info) error
Test(ctx context.Context) error
Restore(ctx context.Context) error

View File

@ -6,6 +6,7 @@ import (
"github.com/k3s-io/kine/pkg/client"
"github.com/rancher/k3s/pkg/bootstrap"
"github.com/sirupsen/logrus"
)
// save writes the current ControlRuntimeBootstrap data to the datastore. This contains a complete
@ -22,8 +23,20 @@ func (c *Cluster) save(ctx context.Context) error {
if err != nil {
return err
}
storageClient, err := client.New(c.etcdConfig)
if err != nil {
return err
}
return c.storageClient.Create(ctx, storageKey(c.config.Token), data)
if err := storageClient.Create(ctx, storageKey(c.config.Token), data); err != nil {
if err.Error() == "key exists" {
logrus.Warnln("Bootstrap key exists. Please follow documentation updating a node after restore.")
return nil
}
return err
}
return nil
}
// storageBootstrap loads data from the datastore into the ControlRuntimeBootstrap struct.
@ -37,7 +50,6 @@ func (c *Cluster) storageBootstrap(ctx context.Context) error {
if err != nil {
return err
}
c.storageClient = storageClient
value, err := storageClient.Get(ctx, storageKey(c.config.Token))
if err == client.ErrNotFound {

View File

@ -161,7 +161,7 @@ func (e *ETCD) IsInitialized(ctx context.Context, config *config.Control) (bool,
}
// Reset resets an etcd node
func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error, cleanCerts func()) error {
func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error) error {
// Wait for etcd to come up as a new single-node cluster, then exit
go func() {
t := time.NewTicker(5 * time.Second)
@ -178,8 +178,6 @@ func (e *ETCD) Reset(ctx context.Context, rebootstrap func() error, cleanCerts f
logrus.Fatal(err)
}
cleanCerts()
// call functions to rewrite them from daemons/control/server.go (prepare())
if err := deps.GenServerDeps(e.config, e.runtime); err != nil {
logrus.Fatal(err)