E2E: Support GOCOVER for more tests + fixes (#8080)

* Add support for local build and go coverage to all E2E tests
* Remove unused EXTERNAL_DB from etcd tests
* Fix private reg test
* Add coverage to tailscale
* Cleanup unnecessary "sudo" in commands

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Add additonal s3 coverage clause

Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
Derek Nola 2023-08-01 09:32:34 -07:00 committed by GitHub
parent 46cbbab263
commit a87b183f9c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 368 additions and 327 deletions

View File

@ -5,15 +5,16 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
NETWORK4_PREFIX = "10.10.10"
NETWORK6_PREFIX = "fd11:decf:c0ff:ee"
install_type = ""
def provision(vm, roles, role_num, node_num)
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
vm.hostname = role
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}"
node_ip6_gw = "#{NETWORK6_PREFIX}::1"
@ -30,13 +31,13 @@ def provision(vm, roles, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, vm.box.to_s]
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"
if roles.include?("server") && role_num == 0
if role.include?("server") && role_num == 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
@ -53,7 +54,7 @@ def provision(vm, roles, role_num, node_num)
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
elsif roles.include?("server") && role_num != 0
elsif role.include?("server") && role_num != 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
@ -70,7 +71,7 @@ def provision(vm, roles, role_num, node_num)
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
if roles.include?("agent")
if role.include?("agent")
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
@ -105,10 +106,9 @@ Vagrant.configure("2") do |config|
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -18,6 +18,7 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
func Test_E2EDualStack(t *testing.T) {
flag.Parse()
@ -38,7 +39,11 @@ var _ = Describe("Verify DualStack Configuration", Ordered, func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
@ -193,6 +198,7 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -5,15 +5,16 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
NETWORK4_PREFIX = "10.10.10"
PUBLIC_NETWORK4_PREFIX = "10.100.100"
install_type = ""
def provision(vm, roles, role_num, node_num)
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
vm.hostname = role
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip4_public = "#{PUBLIC_NETWORK4_PREFIX}.#{100+node_num}"
vm.network "private_network", :ip => node_ip4, :netmask => "255.255.255.0"
@ -24,10 +25,10 @@ def provision(vm, roles, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
if roles.include?("server") && role_num == 0
if role.include?("server") && role_num == 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
@ -41,7 +42,7 @@ def provision(vm, roles, role_num, node_num)
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
if roles.include?("agent")
if role.include?("agent")
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
@ -77,10 +78,9 @@ Vagrant.configure("2") do |config|
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -23,6 +23,7 @@ var serverCount = flag.Int("serverCount", 1, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// getLBServiceIPs returns the externalIP configured for flannel
func getExternalIPs(kubeConfigFile string) ([]string, error) {
@ -66,7 +67,11 @@ var _ = Describe("Verify External-IP config", Ordered, func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
@ -163,6 +168,7 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
IP_FAMILY = (ENV['E2E_IP_FAMILY'] || "ipv4")
@ -12,9 +13,9 @@ NETWORK4_PREFIX = "10.10.10"
NETWORK6_PREFIX = "fd11:decf:c0ff:ee"
install_type = ""
def provision(vm, roles, role_num, node_num)
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
vm.hostname = role
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}"
node_ip6_gw = "#{NETWORK6_PREFIX}::1"
@ -31,13 +32,14 @@ def provision(vm, roles, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, vm.box.to_s]
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"
if roles.include?("server") && role_num == 0
if role.include?("server") && role_num == 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
@ -69,7 +71,7 @@ def provision(vm, roles, role_num, node_num)
end
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
elsif roles.include?("server") && role_num != 0
elsif role.include?("server") && role_num != 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
@ -100,7 +102,7 @@ def provision(vm, roles, role_num, node_num)
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
if roles.include?("agent")
if role.include?("agent")
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
@ -144,10 +146,9 @@ Vagrant.configure("2") do |config|
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -18,6 +18,7 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
func Test_E2EMultiClusterCIDR(t *testing.T) {
flag.Parse()
@ -36,233 +37,249 @@ var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {
It("Starts up IPv4 setup with no issues", func() {
var err error
os.Setenv("E2E_IP_FAMILY", "ipv4")
defer os.Unsetenv("E2E_IP_FAMILY")
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
When("Cluster with IPv4 only is created", func() {
It("Starts up IPv4 setup with no issues", func() {
var err error
os.Setenv("E2E_IP_FAMILY", "ipv4")
defer os.Unsetenv("E2E_IP_FAMILY")
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each node has IPv4", func() {
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
Expect(node.IPv4).Should(ContainSubstring("10.10.10"))
}
})
It("Verifies that each pod has IPv4", func() {
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podIPs {
Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name)
}
})
It("Add new CIDR", func() {
_, err := e2e.DeployWorkload("cluster-cidr.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("10.248.0.0"))
})
It("Restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each pod of agent-0 has IPv4 from the new CIDR", func() {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
if pod.Node == "agent-0" {
Expect(pod.IP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
})
It("Verifies that each node has IPv4", func() {
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
Expect(node.IPv4).Should(ContainSubstring("10.10.10"))
}
})
It("Verifies that each pod has IPv4", func() {
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podIPs {
Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name)
}
})
It("Add new CIDR", func() {
_, err := e2e.DeployWorkload("cluster-cidr.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("10.248.0.0"))
})
It("Restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
It("Destroy Cluster", func() {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
if os.Getenv("E2E_GOCOVER") != "" {
Expect(os.Rename("coverage.out", "coverage-ipv4.out")).To(Succeed())
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
})
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
When("Cluster with IPv6 only is created", func() {
It("Starts up IPv6 setup with no issues", func() {
var err error
os.Setenv("E2E_IP_FAMILY", "ipv6")
defer os.Unsetenv("E2E_IP_FAMILY")
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each node has IPv6", func() {
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff"))
}
})
It("Verifies that each pod has IPv6", func() {
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podIPs {
Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name)
}
})
It("Add new CIDR", func() {
_, err := e2e.DeployWorkload("cluster-cidr-ipv6.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("2001:cafe:248"))
})
It("Delete and restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each pod of agent-0 has IPv6 from the new CIDR", func() {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
if pod.Node == "agent-0" {
Expect(pod.IP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each pod of agent-0 has IPv4 from the new CIDR", func() {
pods, err := e2e.ParsePods(kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if pod.Node == "agent-0" {
Expect(pod.IP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name)
}
}
})
It("Destroy Cluster", func() {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
})
It("Starts up IPv6 setup with no issues", func() {
var err error
os.Setenv("E2E_IP_FAMILY", "ipv6")
defer os.Unsetenv("E2E_IP_FAMILY")
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each node has IPv6", func() {
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff"))
}
})
It("Verifies that each pod has IPv6", func() {
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podIPs {
Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name)
}
})
It("Add new CIDR", func() {
_, err := e2e.DeployWorkload("cluster-cidr-ipv6.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("2001:cafe:248"))
})
It("Delete and restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each pod of agent-0 has IPv6 from the new CIDR", func() {
pods, err := e2e.ParsePods(kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if pod.Node == "agent-0" {
Expect(pod.IP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name)
}
}
})
})
})
@ -275,6 +292,10 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
if os.Getenv("E2E_GOCOVER") != "" {
Expect(os.Rename("coverage.out", "coverage-ipv6.out")).To(Succeed())
}
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -5,7 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
@ -24,6 +24,7 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
@ -93,9 +94,6 @@ def provision(vm, role, role_num, node_num)
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
if !EXTERNAL_DB.empty?
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
end
end
end

View File

@ -22,7 +22,6 @@ var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
// E2E_REGISTRY: true/false (default: false)
@ -85,39 +84,39 @@ var _ = Describe("Verify Create", Ordered, func() {
})
It("Create new private registry", func() {
registry, err := e2e.RunCmdOnNode("sudo docker run -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0])
registry, err := e2e.RunCmdOnNode("docker run -d -p 5000:5000 --restart=always --name registry registry:2 ", serverNodeNames[0])
fmt.Println(registry)
Expect(err).NotTo(HaveOccurred())
})
It("ensures registry is working", func() {
a, err := e2e.RunCmdOnNode("sudo docker ps -a | grep registry\n", serverNodeNames[0])
a, err := e2e.RunCmdOnNode("docker ps -a | grep registry\n", serverNodeNames[0])
fmt.Println(a)
Expect(err).NotTo(HaveOccurred())
})
It("Should pull and image from dockerhub and send it to private registry", func() {
cmd := "sudo docker pull nginx"
cmd := "docker pull nginx"
_, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
nodeIP, err := e2e.FetchNodeExternalIP(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
cmd = "sudo docker tag nginx " + nodeIP + ":5000/my-webpage"
cmd = "docker tag nginx " + nodeIP + ":5000/my-webpage"
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
cmd = "sudo docker push " + nodeIP + ":5000/my-webpage"
cmd = "docker push " + nodeIP + ":5000/my-webpage"
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
cmd = "sudo docker image remove nginx " + nodeIP + ":5000/my-webpage"
cmd = "docker image remove nginx " + nodeIP + ":5000/my-webpage"
_, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), "failed: "+cmd)
})
It("Should create and validate deployment with private registry on", func() {
res, err := e2e.RunCmdOnNode("sudo kubectl create deployment my-webpage --image=my-registry.local/my-webpage", serverNodeNames[0])
res, err := e2e.RunCmdOnNode("kubectl create deployment my-webpage --image=my-registry.local/my-webpage", serverNodeNames[0])
fmt.Println(res)
Expect(err).NotTo(HaveOccurred())
@ -131,7 +130,6 @@ var _ = Describe("Verify Create", Ordered, func() {
}
g.Expect(err).NotTo(HaveOccurred())
g.Expect(pod.Status).Should(Equal("Running"))
g.Expect(pod.Node).Should(Equal(agentNodeNames[0]))
}, "60s", "5s").Should(Succeed())
cmd := "curl " + pod.IP
@ -151,11 +149,12 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
r1, err := e2e.RunCmdOnNode("sudo docker rm -f registry", serverNodeNames[0])
r1, err := e2e.RunCmdOnNode("docker rm -f registry", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), r1)
r2, err := e2e.RunCmdOnNode("sudo kubectl delete deployment my-webpage", serverNodeNames[0])
r2, err := e2e.RunCmdOnNode("kubectl delete deployment my-webpage", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred(), r2)
Expect(err).NotTo(HaveOccurred())
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204', 'generic/ubuntu2204'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
@ -21,6 +22,7 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"

View File

@ -17,6 +17,7 @@ var nodeOS = flag.String("nodeOS", "generic/ubuntu2204", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
@ -40,7 +41,11 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() {
Context("Custom CA is rotated:", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
@ -136,6 +141,7 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
@ -23,8 +24,9 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
runS3mock = <<~'SCRIPT'
@ -57,9 +59,6 @@ def provision(vm, role, role_num, node_num)
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
if !EXTERNAL_DB.empty?
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
end
end
end

View File

@ -20,7 +20,6 @@ var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
// E2E_REGISTRY: true/false (default: false)
@ -83,17 +82,44 @@ var _ = Describe("Verify Create", Ordered, func() {
})
It("ensures s3 mock is working", func() {
a, err := e2e.RunCmdOnNode("sudo docker ps -a | grep mock\n", serverNodeNames[0])
fmt.Println(a)
res, err := e2e.RunCmdOnNode("docker ps -a | grep mock\n", serverNodeNames[0])
fmt.Println(res)
Expect(err).NotTo(HaveOccurred())
})
It("save s3 snapshot", func() {
a, err := e2e.RunCmdOnNode("sudo k3s etcd-snapshot save", serverNodeNames[0])
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(ContainSubstring("S3 bucket test exists"))
Expect(res).To(ContainSubstring("Uploading snapshot"))
Expect(res).To(ContainSubstring("S3 upload complete for"))
})
It("lists saved s3 snapshot", func() {
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(ContainSubstring("on-demand-server-0"))
})
It("save 3 more s3 snapshots", func() {
for _, i := range []string{"1", "2", "3"} {
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot save --name special-"+i, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(ContainSubstring("Uploading snapshot"))
Expect(res).To(ContainSubstring("S3 upload complete for special-" + i))
}
})
It("lists saved s3 snapshot", func() {
res, err := e2e.RunCmdOnNode("k3s etcd-snapshot list", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(ContainSubstring("on-demand-server-0"))
Expect(res).To(ContainSubstring("special-1-server-0"))
Expect(res).To(ContainSubstring("special-2-server-0"))
Expect(res).To(ContainSubstring("special-3-server-0"))
})
// TODO, there is currently a bug that prevents pruning on s3 snapshots that are not prefixed with "on-demand"
// https://github.com/rancher/rke2/issues/3714
// Once fixed, ensure that the snapshots list are actually reduced to 2
It("prunes s3 snapshots", func() {
_, err := e2e.RunCmdOnNode("k3s etcd-snapshot prune --snapshot-retention 2", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(strings.Contains(a, "S3 bucket test exists")).Should(Equal(true))
Expect(strings.Contains(a, "Uploading snapshot")).Should(Equal(true))
Expect(strings.Contains(a, "S3 upload complete for")).Should(Equal(true))
})
})
})
@ -108,6 +134,7 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -22,13 +22,11 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
if !GOCOVER.empty?
addCoverageDir(vm, role)
end
if role.include?("server") && role_num == 0
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|

View File

@ -306,12 +306,10 @@ var _ = AfterEach(func() {
})
var _ = AfterSuite(func() {
if os.Getenv("E2E_GOCOVER") != "" {
Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed())
}
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(serverNodeNames)).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -5,7 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
@ -24,6 +24,7 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "shell", inline: "ping -c 2 k3s.io"
@ -35,10 +36,10 @@ def provision(vm, role, role_num, node_num)
k3s.args = "server "
k3s.config = <<~YAML
token: vagrant
cluster-init: true
node-external-ip: #{NETWORK_PREFIX}.100
flannel-iface: eth1
tls-san: #{NETWORK_PREFIX}.100.nip.io
#{db_type}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
@ -52,7 +53,6 @@ def provision(vm, role, role_num, node_num)
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
#{db_type}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
@ -67,7 +67,6 @@ def provision(vm, role, role_num, node_num)
token: vagrant
node-external-ip: #{node_ip}
flannel-iface: eth1
#{db_type}
YAML
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}]
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
@ -75,25 +74,10 @@ def provision(vm, role, role_num, node_num)
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'
if !EXTERNAL_DB.empty?
vm.provision "shell", inline: "docker start #{EXTERNAL_DB}"
end
end
# This step does not run by default and is designed to be called by higher level tools
end
def getDBType(role, role_num, vm)
if ( EXTERNAL_DB == "" || EXTERNAL_DB == "etcd" )
if role.include?("server") && role_num == 0
return "cluster-init: true"
end
else
puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB
abort
end
return ""
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup

View File

@ -25,7 +25,6 @@ var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master)
func Test_E2ESnapshotRestore(t *testing.T) {
@ -308,6 +307,7 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -23,9 +23,12 @@ def provision(vm, role, role_num, node_num)
defaultOSConfigure(vm)
dockerInstall(vm)
addCoverageDir(vm, role, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
node_ip = "#{NETWORK_PREFIX}.#{100+node_num}"
vm.provision "shell", inline: "ping -c 2 k3s.io"
if role.include?("server")
@ -53,9 +56,6 @@ def provision(vm, role, role_num, node_num)
end
end
if !GOCOVER.empty?
addCoverageDir(vm, role)
end
if vm.box.to_s.include?("microos")
vm.provision 'k3s-reload', type: 'reload', run: 'once'

View File

@ -277,12 +277,10 @@ var _ = AfterEach(func() {
})
var _ = AfterSuite(func() {
if os.Getenv("E2E_GOCOVER") != "" {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
}
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -5,6 +5,7 @@ NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# This key must be created using tailscale web
@ -12,26 +13,26 @@ TAILSCALE_KEY = (ENV['E2E_TAILSCALE_KEY'] || "")
NETWORK4_PREFIX = "10.10.10"
install_type = ""
def provision(node, roles, role_num, node_num)
node.vm.box = NODE_BOXES[node_num]
node.vm.hostname = "#{roles[0]}-#{role_num}"
def provision(vm, roles, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node.vm.network "private_network", ip: node_ip4, netmask: "255.255.255.0"
vm.network "private_network", ip: node_ip4, netmask: "255.255.255.0"
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults
defaultOSConfigure(node.vm)
defaultOSConfigure(vm)
addCoverageDir(vm, roles, GOCOVER)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
install_type = getInstallType(node.vm, RELEASE_VERSION, GITHUB_BRANCH)
node.vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"
node.vm.provision "Install tailscale", type: "shell", inline: "curl -fsSL https://tailscale.com/install.sh | sh"
vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"
vm.provision "Install tailscale", type: "shell", inline: "curl -fsSL https://tailscale.com/install.sh | sh"
if roles.include?("server") && role_num == 0
server_IP = nil
node.vm.provision :k3s, run: 'once' do |k3s|
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
k3s.config = <<~YAML
@ -43,7 +44,7 @@ def provision(node, roles, role_num, node_num)
end
end
if roles.include?("agent")
node.vm.provision :k3s, run: 'once' do |k3s|
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
k3s.config = <<~YAML
@ -77,7 +78,7 @@ Vagrant.configure("2") do |config|
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node, roles, role_num, i)
provision(node.vm, roles, role_num, i)
end
end
end

View File

@ -120,6 +120,7 @@ var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -493,6 +493,9 @@ func UpgradeCluster(nodeNames []string, local bool) error {
}
func GetCoverageReport(nodeNames []string) error {
if os.Getenv("E2E_GOCOVER") == "" {
return nil
}
covDirs := []string{}
for _, nodeName := range nodeNames {
covDir := nodeName + "-cov"

View File

@ -46,10 +46,7 @@ def provision(vm, role, role_num, node_num)
if !REGISTRY.empty?
vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ]
end
if !GOCOVER.empty?
addCoverageDir(vm, role)
end
addCoverageDir(vm, role, GOCOVER)
if role.include?("server") && role_num == 0
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|

View File

@ -385,12 +385,10 @@ var _ = AfterEach(func() {
})
var _ = AfterSuite(func() {
if os.Getenv("E2E_GOCOVER") != "" {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
}
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}

View File

@ -34,7 +34,10 @@ def getInstallType(vm, release_version, branch)
end
end
def addCoverageDir(vm, role)
def addCoverageDir(vm, role, gocover)
if gocover.empty?
return
end
service = role.include?("agent") ? "k3s-agent" : "k3s"
script = <<~SHELL
mkdir -p /tmp/k3scov

View File

@ -39,10 +39,7 @@ def provision(vm, role, role_num, node_num)
if !REGISTRY.empty?
vm.provision "Set private registry", type: "shell", path: scripts_location + "/registry.sh", args: [ "#{NETWORK_PREFIX}.1" ]
end
if !GOCOVER.empty?
addCoverageDir(vm, role)
end
addCoverageDir(vm, role, GOCOVER)
if role.include?("server") && role_num == 0
vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s|

View File

@ -385,12 +385,10 @@ var _ = AfterEach(func() {
})
var _ = AfterSuite(func() {
if os.Getenv("E2E_GOCOVER") != "" {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
}
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}