mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Add k3s etcd restoration integration test (#5014)
* Add k3s etcd restoration test Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix tests and rebase Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Reorganizing the tests Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fixing comments Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix etcd restore Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * dont check for errors when restoring Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * use eventually to test for restoration Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fix tests Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fix golint Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
This commit is contained in:
parent
0a3bdc6dae
commit
13728058a4
121
pkg/etcd/int_tests/restore/etcd_restore_int_test.go
Normal file
121
pkg/etcd/int_tests/restore/etcd_restore_int_test.go
Normal file
@ -0,0 +1,121 @@
|
||||
package restore_test
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
. "github.com/onsi/gomega"
|
||||
testutil "github.com/rancher/k3s/tests/util"
|
||||
)
|
||||
|
||||
var server1, server2 *testutil.K3sServer
|
||||
var tmpdDataDir = "/tmp/restoredatadir"
|
||||
var clientCACertHash string
|
||||
var restoreServerArgs = []string{"--cluster-init", "-t", "test", "-d", tmpdDataDir}
|
||||
var _ = BeforeSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
var err error
|
||||
server1, err = testutil.K3sStartServer(restoreServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
var _ = Describe("etcd snapshot restore", func() {
|
||||
BeforeEach(func() {
|
||||
if testutil.IsExistingServer() && !testutil.ServerArgsPresent(restoreServerArgs) {
|
||||
Skip("Test needs k3s server with: " + strings.Join(restoreServerArgs, " "))
|
||||
}
|
||||
})
|
||||
When("a snapshot is restored on existing node", func() {
|
||||
It("etcd starts up with no problems", func() {
|
||||
Eventually(func() (string, error) {
|
||||
return testutil.K3sCmd("kubectl", "get", "pods", "-A")
|
||||
}, "360s", "5s").Should(MatchRegexp("kube-system.+coredns.+1\\/1.+Running"))
|
||||
})
|
||||
It("create a workload", func() {
|
||||
result, err := testutil.K3sCmd("kubectl", "create", "-f", "./testdata/temp_depl.yaml")
|
||||
Expect(result).To(ContainSubstring("deployment.apps/nginx-deployment created"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
It("saves an etcd snapshot", func() {
|
||||
Expect(testutil.K3sCmd("etcd-snapshot", "save", "-d", tmpdDataDir, "--name", "snapshot-to-restore")).
|
||||
To(ContainSubstring("saved"))
|
||||
})
|
||||
It("list snapshots", func() {
|
||||
Expect(testutil.K3sCmd("etcd-snapshot", "ls", "-d", tmpdDataDir)).
|
||||
To(MatchRegexp(`://` + tmpdDataDir + `/server/db/snapshots/snapshot-to-restore`))
|
||||
})
|
||||
// create another workload
|
||||
It("create a workload 2", func() {
|
||||
result, err := testutil.K3sCmd("kubectl", "create", "-f", "./testdata/temp_depl2.yaml")
|
||||
Expect(result).To(ContainSubstring("deployment.apps/nginx-deployment-post-snapshot created"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
It("get Client CA cert hash", func() {
|
||||
// get md5sum of the CA certs
|
||||
var err error
|
||||
clientCACertHash, err = testutil.RunCommand("md5sum " + tmpdDataDir + "/server/tls/client-ca.crt | cut -f 1 -d' '")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("stop k3s", func() {
|
||||
Expect(testutil.K3sKillServer(server1, true)).To(Succeed())
|
||||
})
|
||||
It("restore the snapshot", func() {
|
||||
// get snapshot file
|
||||
filePath, err := testutil.RunCommand(`sudo find ` + tmpdDataDir + `/server -name "*snapshot-to-restore*"`)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
filePath = strings.TrimSuffix(filePath, "\n")
|
||||
Eventually(func() (string, error) {
|
||||
return testutil.K3sCmd("server", "-d", tmpdDataDir, "--cluster-reset", "--token", "test", "--cluster-reset-restore-path", filePath)
|
||||
}, "360s", "5s").Should(ContainSubstring(`Etcd is running, restart without --cluster-reset flag now`))
|
||||
})
|
||||
It("start k3s server", func() {
|
||||
var err error
|
||||
server2, err = testutil.K3sStartServer(restoreServerArgs...)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("starts up with no problems", func() {
|
||||
Eventually(func() (string, error) {
|
||||
return testutil.K3sCmd("kubectl", "get", "pods", "-A")
|
||||
}, "360s", "5s").Should(MatchRegexp("kube-system.+coredns.+1\\/1.+Running"))
|
||||
})
|
||||
It("Make sure Workload 1 exists", func() {
|
||||
Eventually(func() (string, error) {
|
||||
return testutil.K3sCmd("kubectl", "get", "deployment", "nginx-deployment")
|
||||
}, "360s", "5s").Should(ContainSubstring("3/3"))
|
||||
})
|
||||
It("Make sure Workload 2 does not exists", func() {
|
||||
res, err := testutil.K3sCmd("kubectl", "get", "deployment", "nginx-deployment-post-snapshot")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(res).To(ContainSubstring("not found"))
|
||||
})
|
||||
It("check if CA cert hash matches", func() {
|
||||
// get md5sum of the CA certs
|
||||
var err error
|
||||
clientCACertHash2, err := testutil.RunCommand("md5sum " + tmpdDataDir + "/server/tls/client-ca.crt | cut -f 1 -d' '")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(clientCACertHash2).To(Equal(clientCACertHash))
|
||||
})
|
||||
It("stop k3s", func() {
|
||||
Expect(testutil.K3sKillServer(server2, false)).To(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
Expect(testutil.K3sKillServer(server1, false)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(server1, true, tmpdDataDir)).To(Succeed())
|
||||
Expect(testutil.K3sKillServer(server2, false)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(server2, true, tmpdDataDir)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
func Test_RestoreSnapshotIntegrationEtcd(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecsWithDefaultAndCustomReporters(t, "Etcd Suite", []Reporter{
|
||||
reporters.NewJUnitReporter("/tmp/results/junit-etcd-restore.xml"),
|
||||
})
|
||||
}
|
21
pkg/etcd/int_tests/restore/testdata/temp_depl.yaml
vendored
Normal file
21
pkg/etcd/int_tests/restore/testdata/temp_depl.yaml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
21
pkg/etcd/int_tests/restore/testdata/temp_depl2.yaml
vendored
Normal file
21
pkg/etcd/int_tests/restore/testdata/temp_depl2.yaml
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment-post-snapshot
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
@ -1,4 +1,4 @@
|
||||
package etcd_test
|
||||
package snapshot_test
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
@ -113,7 +113,7 @@ var _ = Describe("etcd snapshots", func() {
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
Expect(testutil.K3sKillServer(server, false)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(server, true)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(server, true, "")).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
@ -53,7 +53,7 @@ var _ = Describe("dual stack", func() {
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() && os.Getenv("CI") != "true" {
|
||||
Expect(testutil.K3sKillServer(dualStackServer, false)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(dualStackServer, true)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(dualStackServer, true, "")).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -82,7 +82,7 @@ var _ = Describe("local storage", func() {
|
||||
var _ = AfterSuite(func() {
|
||||
if !testutil.IsExistingServer() {
|
||||
Expect(testutil.K3sKillServer(localStorageServer, false)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(localStorageServer, true)).To(Succeed())
|
||||
Expect(testutil.K3sCleanup(localStorageServer, true, "")).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -2,8 +2,10 @@ package util
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
@ -18,7 +20,7 @@ import (
|
||||
// Compile-time variable
|
||||
var existingServer = "False"
|
||||
|
||||
const lockFile = "/var/lock/k3s-test.lock"
|
||||
const lockFile = "/tmp/k3s-test.lock"
|
||||
|
||||
type K3sServer struct {
|
||||
cmd *exec.Cmd
|
||||
@ -146,9 +148,7 @@ func K3sStartServer(inputArgs ...string) (*K3sServer, error) {
|
||||
for _, arg := range inputArgs {
|
||||
cmdArgs = append(cmdArgs, strings.Fields(arg)...)
|
||||
}
|
||||
|
||||
k3sBin := findK3sExecutable()
|
||||
|
||||
k3sCmd := append([]string{"server"}, cmdArgs...)
|
||||
cmd := exec.Command(k3sBin, k3sCmd...)
|
||||
// Give the server a new group id so we can kill it and its children later
|
||||
@ -176,7 +176,7 @@ func K3sKillServer(server *K3sServer, releaseLock bool) error {
|
||||
}
|
||||
|
||||
// K3sCleanup attempts to cleanup networking and files leftover from an integration test
|
||||
func K3sCleanup(server *K3sServer, releaseLock bool) error {
|
||||
func K3sCleanup(server *K3sServer, releaseLock bool, dataDir string) error {
|
||||
if cni0Link, err := netlink.LinkByName("cni0"); err == nil {
|
||||
links, _ := netlink.LinkList()
|
||||
for _, link := range links {
|
||||
@ -193,7 +193,10 @@ func K3sCleanup(server *K3sServer, releaseLock bool) error {
|
||||
if flannelV6, err := netlink.LinkByName("flannel-v6.1"); err == nil {
|
||||
netlink.LinkDel(flannelV6)
|
||||
}
|
||||
if err := os.RemoveAll("/var/lib/rancher/k3s"); err != nil {
|
||||
if dataDir == "" {
|
||||
dataDir = "/var/lib/rancher/k3s"
|
||||
}
|
||||
if err := os.RemoveAll(dataDir); err != nil {
|
||||
return err
|
||||
}
|
||||
if releaseLock {
|
||||
@ -201,3 +204,15 @@ func K3sCleanup(server *K3sServer, releaseLock bool) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunCommand Runs command on the cluster accessing the cluster through kubeconfig file
|
||||
func RunCommand(cmd string) (string, error) {
|
||||
c := exec.Command("bash", "-c", cmd)
|
||||
var out bytes.Buffer
|
||||
c.Stdout = &out
|
||||
err := c.Run()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("%s", err)
|
||||
}
|
||||
return out.String(), nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user