Add e2e tests for CA cert rotation

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2023-01-27 20:40:29 +00:00 committed by Brad Davidson
parent 8a6404f97c
commit be7f751863
3 changed files with 227 additions and 1 deletions

84
tests/e2e/rotateca/Vagrantfile vendored Normal file
View File

@ -0,0 +1,84 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-1"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
if role.include?("server") && role_num == 0
vm.provision 'custom-ca', type: 'shell', run: 'once' do |script|
script.path = '../../../contrib/util/certs.sh'
script.env = {'PRODUCT' => 'vagrant-e2e-test', 'DATA_DIR' => '/var/lib/rancher/k3s'}
end
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("server") && role_num != 0
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
elsif role.include?("agent")
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
end
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -0,0 +1,142 @@
package secretsencryption
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2204", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
func Test_E2ECustomCARotation(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Secrets Encryption Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
agentNodeNames []string
serverNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify Custom CA Rotation", Ordered, func() {
Context("Custom CA is rotated:", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
It("Generates New CA Certificates", func() {
cmds := []string{
"sudo mkdir -p /opt/rancher/k3s/server",
"sudo cp -r /var/lib/rancher/k3s/server/tls /opt/rancher/k3s/server",
"curl -ksL https://raw.githubusercontent.com/brandond/k3s/custom-cert-gen/contrib/util/certs.sh | sudo DATA_DIR=/opt/rancher/k3s bash -s -",
}
for _, cmd := range cmds {
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
}
})
It("Rotates CA Certificates", func() {
cmd := "sudo k3s certificate rotate-ca --path=/opt/rancher/k3s"
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
})
It("Restarts K3s agents", func() {
Expect(e2e.RestartCluster(agentNodeNames)).To(Succeed())
})
It("Checks node and pod status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

View File

@ -40,7 +40,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
It("Starts up with no issues", func() { It("Starts up with no issues", func() {
var err error var err error
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG") fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS) fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames) fmt.Println("Server Nodes:", serverNodeNames)