E2E: Local cluster testing (#5977)

* Cleanup validatecluster printout
* remove deprecated kubectl exec format
* Implement CreateLocalCluster function
* Update testing documentation

Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
Derek Nola 2022-08-15 15:00:22 -07:00 committed by GitHub
parent 116c977fbf
commit 75f8cfb6ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 114 additions and 37 deletions

View File

@ -20,6 +20,32 @@ A E2E test consists of two parts:
See the [validate cluster test](../tests/e2e/validatecluster/validatecluster_test.go) as an example.
## Setup
To run the E2E tests, you must first install the following:
- Vagrant
- Libvirt
- Vagrant plugins
### Vagrant
Download the latest version (currently 2.2.19) of Vagrant [*from the website*](https://www.vagrantup.com/downloads). Do not use built-in packages, they often old or do not include the required ruby library extensions necessary to get certain plugins working.
### Libvirt
Follow the OS specific guides to install libvirt/qemu on your host:
- [openSUSE](https://documentation.suse.com/sles/15-SP1/html/SLES-all/cha-vt-installation.html)
- [ubuntu](https://ubuntu.com/server/docs/virtualization-libvirt)
- [debian](https://wiki.debian.org/KVM#Installation)
- [fedora](https://developer.fedoraproject.org/tools/virtualization/installing-libvirt-and-virt-install-on-fedora-linux.html)
### Vagrant plugins
Install the necessary vagrant plugins with the following command:
```bash
vagrant plugin install vagrant-libvirt vagrant-scp vagrant-k3s vagrant-reload
```
## Running
Generally, E2E tests are run as a nightly Jenkins job for QA. They can still be run locally but additional setup may be required. By default, all E2E tests are designed with `libvirt` as the underlying VM provider. Instructions for installing libvirt and its associated vagrant plugin, `vagrant-libvirt` can be found [here.](https://github.com/vagrant-libvirt/vagrant-libvirt#installation) `VirtualBox` is also supported as a backup VM provider.

View File

@ -36,6 +36,7 @@ def provision(vm, role, role_num, node_num)
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1 --docker]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end

View File

@ -26,12 +26,13 @@ def provision(vm, roles, role_num, node_num)
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"
vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults
defaultOSConfigure(vm)
vm.provision "IPv6 Setup", type: "shell", path: "../scripts/ipv6.sh", args: [node_ip4, node_ip6, vm.box]
vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, vm.box.to_s]
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"

View File

@ -5,11 +5,17 @@ os=$3
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.eth1.accept_dad=0
sysctl -w net.ipv6.conf.eth1.accept_ra=0
sysctl -w net.ipv6.conf.eth1.forwarding=0
if [ -z "${os##*ubuntu*}" ]; then
netplan set ethernets.eth1.accept-ra=false
netplan set ethernets.eth1.addresses=["$ip4_addr"/24,"$ip6_addr"/64]
netplan apply
elif [ -z "${os##*alpine*}" ]; then
iplink set eth1 down
iplink set eth1 up
ip -6 addr add "$ip6_addr"/64 dev eth1
else
ip -6 addr add "$ip6_addr"/64 dev eth1
fi

View File

@ -38,7 +38,8 @@ func CountOfStringInSlice(str string, pods []Pod) int {
return count
}
func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
// genNodeEnvs generates the node and testing environment variables for vagrant up
func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]string, []string, string) {
serverNodeNames := make([]string, serverCount)
for i := 0; i < serverCount; i++ {
serverNodeNames[i] = "server-" + strconv.Itoa(i)
@ -47,12 +48,22 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
for i := 0; i < agentCount; i++ {
agentNodeNames[i] = "agent-" + strconv.Itoa(i)
}
nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")
nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")
nodeRoles = strings.TrimSpace(nodeRoles)
nodeBoxes := strings.Repeat(nodeOS+" ", serverCount+agentCount)
nodeBoxes = strings.TrimSpace(nodeBoxes)
nodeEnvs := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s"`, nodeRoles, nodeBoxes)
return serverNodeNames, agentNodeNames, nodeEnvs
}
func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
var testOptions string
for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") {
@ -60,15 +71,54 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
}
}
cmd := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s" %s vagrant up &> vagrant.log`, nodeRoles, nodeBoxes, testOptions)
cmd := fmt.Sprintf(`%s %s vagrant up &> vagrant.log`, nodeEnvs, testOptions)
fmt.Println(cmd)
if _, err := RunCommand(cmd); err != nil {
fmt.Println("Error Creating Cluster", err)
return nil, nil, err
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
}
return serverNodeNames, agentNodeNames, nil
}
// CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for
// this function to work. The binary is deployed as an airgapped install of k3s on the VMs.
// This is intended only for local testing puposes when writing a new E2E test.
func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
var testOptions string
for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") {
testOptions += " " + env
}
}
testOptions += " E2E_RELEASE_VERSION=skip"
cmd := fmt.Sprintf(`%s vagrant up --no-provision &> vagrant.log`, nodeEnvs)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed creating nodes: %s: %v", cmd, err)
}
nodeRoles := append(serverNodeNames, agentNodeNames...)
for _, node := range nodeRoles {
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
}
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
return nil, nil, err
}
}
cmd = fmt.Sprintf(`%s %s vagrant provision &>> vagrant.log`, nodeEnvs, testOptions)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
}
return serverNodeNames, agentNodeNames, nil
}
func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) {
resourceDir := "../amd64_resource_files"
if hardened {
@ -240,7 +290,11 @@ func RestartCluster(nodeNames []string) error {
// RunCmdOnNode executes a command from within the given node
func RunCmdOnNode(cmd string, nodename string) (string, error) {
runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename
return RunCommand(runcmd)
out, err := RunCommand(runcmd)
if err != nil {
return out, fmt.Errorf("failed to run command %s on node %s: %v", cmd, nodename, err)
}
return out, nil
}
// RunCommand executes a command on the host

View File

@ -234,7 +234,7 @@ var _ = Describe("Verify Upgrade", func() {
// Check data after re-creation
Eventually(func() (string, error) {
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
return e2e.RunCommand(cmd)
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
})
@ -364,7 +364,7 @@ var _ = Describe("Verify Upgrade", func() {
It("After upgrade verify Local Path Provisioner storage ", func() {
Eventually(func() (string, error) {
cmd := "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
return e2e.RunCommand(cmd)
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
})

View File

@ -92,7 +92,6 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
res, err := e2e.RunCmdOnNode(cmd, nodeName)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
Expect(res).Should(ContainSubstring("test-clusterip"))
}, "120s", "10s").Should(Succeed())
}
@ -119,8 +118,7 @@ var _ = Describe("Verify Create", func() {
fmt.Println(cmd)
Eventually(func(g Gomega) {
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-nodeport"))
}, "240s", "5s").Should(Succeed())
}
@ -140,16 +138,14 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
fmt.Println(cmd)
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
}
@ -166,8 +162,7 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("test-ingress"))
}, "240s", "5s").Should(Succeed())
}
@ -196,16 +191,15 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
res, _ := e2e.RunCommand(cmd)
fmt.Println(res)
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("dnsutils"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
fmt.Println(cmd)
res, _ := e2e.RunCommand(cmd)
fmt.Println(res)
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
}, "420s", "2s").Should(Succeed())
})
@ -217,8 +211,7 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
g.Expect(res).Should(ContainSubstring("Bound"))
}, "420s", "2s").Should(Succeed())
@ -226,8 +219,7 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
@ -235,12 +227,10 @@ var _ = Describe("Verify Create", func() {
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
_, err = e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println("Data stored in pvc: local-path-test")
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
@ -248,23 +238,22 @@ var _ = Describe("Verify Create", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
res, _ := e2e.RunCommand(cmd)
fmt.Println(res)
g.Expect(res).Should(ContainSubstring("local-path-provisioner"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
fmt.Println(res)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
cmd := "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
res, err = e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
fmt.Println("Data after re-creation", res)
g.Expect(res).Should(ContainSubstring("local-path-test"))
}, "180s", "2s").Should(Succeed())