mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
E2E Test Improvements (#5102)
* Fix infinite while loop on failure, reduce upgradecluster * DRY code Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
parent
e28be2912c
commit
830c330aad
10
tests/e2e/scripts/latest_commit.sh
Normal file → Executable file
10
tests/e2e/scripts/latest_commit.sh
Normal file → Executable file
@ -1,7 +1,15 @@
|
|||||||
|
#!/bin/bash
|
||||||
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
|
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
|
||||||
|
iterations=0
|
||||||
curl -s -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/k3s-io/k3s/commits?per_page=5&sha=$1" | jq -r '.[] | .sha' &> $2
|
curl -s -H 'Accept: application/vnd.github.v3+json' "https://api.github.com/repos/k3s-io/k3s/commits?per_page=5&sha=$1" | jq -r '.[] | .sha' &> $2
|
||||||
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
|
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
|
||||||
while [ $? -ne 0 ]; do
|
while [ $? -ne 0 ]; do
|
||||||
sed -i 1d $2
|
((iterations++))
|
||||||
|
if [ "$iterations" -ge 6 ]; then
|
||||||
|
echo "No valid commits found"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sed -i 1d "$2"
|
||||||
|
sleep 1
|
||||||
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
|
curl -s --fail https://storage.googleapis.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum
|
||||||
done
|
done
|
25
tests/e2e/upgradecluster/Vagrantfile
vendored
25
tests/e2e/upgradecluster/Vagrantfile
vendored
@ -17,9 +17,11 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
||||||
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
|
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
|
||||||
|
|
||||||
osConfigure(vm)
|
vagrant_defaults = '../vagrantdefaults.rb'
|
||||||
|
load vagrant_defaults if File.exists?(vagrant_defaults)
|
||||||
|
|
||||||
|
defaultOSConfigure(vm)
|
||||||
|
|
||||||
if !RELEASE_VERSION.empty?
|
if !RELEASE_VERSION.empty?
|
||||||
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
||||||
elsif RELEASE_CHANNEL == "commit"
|
elsif RELEASE_CHANNEL == "commit"
|
||||||
@ -35,14 +37,12 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
|
|
||||||
if roles.include?("server") && role_num == 0
|
if roles.include?("server") && role_num == 0
|
||||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||||
k3s.installer_url = 'https://get.k3s.io'
|
|
||||||
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
|
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
|
||||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||||
end
|
end
|
||||||
elsif roles.include?("server") && role_num != 0
|
elsif roles.include?("server") && role_num != 0
|
||||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||||
k3s.installer_url = 'https://get.k3s.io'
|
|
||||||
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||||
@ -50,7 +50,6 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
end
|
end
|
||||||
if roles.include?("agent")
|
if roles.include?("agent")
|
||||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||||
k3s.installer_url = 'https://get.k3s.io'
|
|
||||||
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||||
@ -61,22 +60,6 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def osConfigure(vm)
|
|
||||||
|
|
||||||
if vm.box.include?("ubuntu2004")
|
|
||||||
vm.provision "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0", run: 'once'
|
|
||||||
vm.provision "shell", inline: "apt install -y jq", run: 'once'
|
|
||||||
end
|
|
||||||
if vm.box.include?("Leap")
|
|
||||||
vm.provision "shell", inline: "zypper install -y jq", run: 'once'
|
|
||||||
end
|
|
||||||
if vm.box.include?("microos")
|
|
||||||
vm.provision "shell", inline: "transactional-update pkg install -y jq", run: 'once'
|
|
||||||
vm.provision 'reload', run: 'once'
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
|
||||||
# Default provider is libvirt, virtualbox is only provided as a backup
|
# Default provider is libvirt, virtualbox is only provided as a backup
|
||||||
|
@ -77,23 +77,17 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
Eventually(func() (string, error) {
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||||
g.Expect(res).Should(ContainSubstring("test-clusterip"))
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
|
|
||||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||||
cmd := "\"curl -L --insecure http://" + clusterip + "/name.html\""
|
cmd = "\"curl -L --insecure http://" + clusterip + "/name.html\""
|
||||||
fmt.Println(cmd)
|
|
||||||
for _, nodeName := range serverNodenames {
|
for _, nodeName := range serverNodenames {
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-clusterip"))
|
|
||||||
}, "120s", "10s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -105,22 +99,18 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
node_external_ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
node_external_ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||||
nodeport, err := e2e.RunCommand(cmd)
|
nodeport, err := e2e.RunCommand(cmd)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
|
||||||
Eventually(func(g Gomega) {
|
|
||||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||||
res, err := e2e.RunCommand(cmd)
|
Eventually(func() (string, error) {
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
|
|
||||||
cmd = "curl -L --insecure http://" + node_external_ip + ":" + nodeport + "/name.html"
|
cmd = "curl -L --insecure http://" + node_external_ip + ":" + nodeport + "/name.html"
|
||||||
fmt.Println(cmd)
|
fmt.Println(cmd)
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "failed cmd: "+cmd)
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -132,21 +122,16 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||||
port, err := e2e.RunCommand(cmd)
|
port, err := e2e.RunCommand(cmd)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Eventually(func(g Gomega) {
|
|
||||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
|
||||||
|
|
||||||
res, err := e2e.RunCommand(cmd)
|
cmd = "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
Eventually(func() (string, error) {
|
||||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
return e2e.RunCommand(cmd)
|
||||||
}, "240s", "5s").Should(Succeed())
|
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||||
Eventually(func(g Gomega) {
|
|
||||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||||
fmt.Println(cmd)
|
Eventually(func() (string, error) {
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"), "failed cmd: "+cmd)
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -157,14 +142,9 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
for _, nodeName := range serverNodenames {
|
for _, nodeName := range serverNodenames {
|
||||||
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
ip, _ := e2e.FetchNodeExternalIP(nodeName)
|
||||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||||
fmt.Println(cmd)
|
Eventually(func() (string, error) {
|
||||||
|
return e2e.RunCommand(cmd)
|
||||||
Eventually(func(g Gomega) {
|
}, "240s", "5s").Should(ContainSubstring("test-ingress"), "failed cmd: "+cmd)
|
||||||
res, err := e2e.RunCommand(cmd)
|
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -189,21 +169,15 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, false)
|
_, err := e2e.DeployWorkload("dnsutils.yaml", kubeConfigFile, false)
|
||||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||||
res, _ := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
fmt.Println(res)
|
}, "420s", "2s").Should(ContainSubstring("dnsutils"))
|
||||||
g.Expect(res).Should(ContainSubstring("dnsutils"))
|
|
||||||
}, "420s", "2s").Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
Eventually(func() (string, error) {
|
||||||
fmt.Println(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
res, err := e2e.RunCommand(cmd)
|
}, "420s", "2s").Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
|
||||||
}, "420s", "2s").Should(Succeed())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Verifies Local Path Provisioner storage ", func() {
|
It("Verifies Local Path Provisioner storage ", func() {
|
||||||
@ -241,12 +215,10 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, false)
|
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, false)
|
||||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||||
res, _ := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
fmt.Println(res)
|
}, "420s", "2s").Should(ContainSubstring("local-path-provisioner"))
|
||||||
g.Expect(res).Should(ContainSubstring("local-path-provisioner"))
|
|
||||||
}, "420s", "2s").Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func(g Gomega) {
|
||||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||||
@ -257,13 +229,11 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
g.Expect(res).Should(ContainSubstring("Running"))
|
g.Expect(res).Should(ContainSubstring("Running"))
|
||||||
}, "420s", "2s").Should(Succeed())
|
}, "420s", "2s").Should(Succeed())
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
// Check data after re-creation
|
||||||
|
Eventually(func() (string, error) {
|
||||||
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
||||||
res, err = e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
|
||||||
fmt.Println("Data after re-creation", res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("local-path-test"))
|
|
||||||
}, "180s", "2s").Should(Succeed())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Upgrades with no issues", func() {
|
It("Upgrades with no issues", func() {
|
||||||
@ -285,7 +255,7 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
g.Expect(node.Status).Should(Equal("Ready"))
|
g.Expect(node.Status).Should(Equal("Ready"))
|
||||||
}
|
}
|
||||||
}, "420s", "5s").Should(Succeed())
|
}, "420s", "5s").Should(Succeed())
|
||||||
_, _ = e2e.ParseNodes(kubeConfigFile, true)
|
e2e.ParseNodes(kubeConfigFile, true)
|
||||||
|
|
||||||
fmt.Printf("\nFetching Pods status\n")
|
fmt.Printf("\nFetching Pods status\n")
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func(g Gomega) {
|
||||||
@ -299,27 +269,22 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, "420s", "5s").Should(Succeed())
|
}, "420s", "5s").Should(Succeed())
|
||||||
_, _ = e2e.ParsePods(kubeConfigFile, true)
|
e2e.ParsePods(kubeConfigFile, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("After upgrade verifies ClusterIP Service", func() {
|
It("After upgrade verifies ClusterIP Service", func() {
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "420s", "5s").Should(ContainSubstring("test-clusterip"))
|
||||||
g.Expect(res).Should(ContainSubstring("test-clusterip"))
|
|
||||||
}, "420s", "5s").Should(Succeed())
|
|
||||||
|
|
||||||
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||||
cmd := "\"curl -L --insecure http://" + clusterip + "/name.html\""
|
cmd := "\"curl -L --insecure http://" + clusterip + "/name.html\""
|
||||||
fmt.Println(cmd)
|
fmt.Println(cmd)
|
||||||
for _, nodeName := range serverNodenames {
|
for _, nodeName := range serverNodenames {
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
res, err := e2e.RunCmdOnNode(cmd, nodeName)
|
return e2e.RunCmdOnNode(cmd, nodeName)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "120s", "10s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-clusterip"))
|
|
||||||
}, "120s", "10s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -331,21 +296,16 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
nodeport, err := e2e.RunCommand(cmd)
|
nodeport, err := e2e.RunCommand(cmd)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "240s", "5s").Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
||||||
g.Expect(res).Should(ContainSubstring("test-nodeport"), "nodeport pod was not created")
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
|
|
||||||
cmd = "curl -L --insecure http://" + node_external_ip + ":" + nodeport + "/name.html"
|
cmd = "curl -L --insecure http://" + node_external_ip + ":" + nodeport + "/name.html"
|
||||||
fmt.Println(cmd)
|
fmt.Println(cmd)
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "240s", "5s").Should(ContainSubstring("test-nodeport"))
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -355,21 +315,15 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||||
port, err := e2e.RunCommand(cmd)
|
port, err := e2e.RunCommand(cmd)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
cmd := "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||||
fmt.Println(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
res, err := e2e.RunCommand(cmd)
|
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "240s", "5s").Should(ContainSubstring("test-loadbalancer"))
|
||||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
|
||||||
}, "240s", "5s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -379,12 +333,9 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||||
fmt.Println(cmd)
|
fmt.Println(cmd)
|
||||||
|
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "420s", "5s").Should(ContainSubstring("test-ingress"))
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
|
||||||
}, "420s", "5s").Should(Succeed())
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -402,24 +353,17 @@ var _ = Describe("Verify Upgrade", func() {
|
|||||||
}, "420s", "1s").Should(Succeed())
|
}, "420s", "1s").Should(Succeed())
|
||||||
})
|
})
|
||||||
It("After upgrade verifies dns access", func() {
|
It("After upgrade verifies dns access", func() {
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"
|
||||||
fmt.Println(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
res, err := e2e.RunCommand(cmd)
|
}, "180s", "2s").Should((ContainSubstring("kubernetes.default.svc.cluster.local")))
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
|
||||||
fmt.Println(res)
|
|
||||||
g.Expect(res).Should((ContainSubstring("kubernetes.default.svc.cluster.local")))
|
|
||||||
}, "180s", "2s").Should(Succeed())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("After upgrade verify Local Path Provisioner storage ", func() {
|
It("After upgrade verify Local Path Provisioner storage ", func() {
|
||||||
Eventually(func(g Gomega) {
|
Eventually(func() (string, error) {
|
||||||
cmd := "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
cmd := "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
||||||
res, err := e2e.RunCommand(cmd)
|
return e2e.RunCommand(cmd)
|
||||||
g.Expect(err).NotTo(HaveOccurred())
|
}, "180s", "2s").Should(ContainSubstring("local-path-test"))
|
||||||
fmt.Println("Data after re-creation", res)
|
|
||||||
g.Expect(res).Should(ContainSubstring("local-path-test"))
|
|
||||||
}, "180s", "2s").Should(Succeed())
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
13
tests/e2e/vagrantdefaults.rb
Normal file
13
tests/e2e/vagrantdefaults.rb
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
def defaultOSConfigure(vm)
|
||||||
|
if vm.box.include?("ubuntu2004")
|
||||||
|
vm.provision "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0"
|
||||||
|
vm.provision "shell", inline: "apt install -y jq"
|
||||||
|
end
|
||||||
|
if vm.box.include?("Leap")
|
||||||
|
vm.provision "shell", inline: "zypper install -y jq"
|
||||||
|
end
|
||||||
|
if vm.box.include?("microos")
|
||||||
|
vm.provision "shell", inline: "transactional-update pkg install -y jq"
|
||||||
|
vm.provision 'reload', run: 'once'
|
||||||
|
end
|
||||||
|
end
|
24
tests/e2e/validatecluster/Vagrantfile
vendored
24
tests/e2e/validatecluster/Vagrantfile
vendored
@ -17,7 +17,10 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
|
||||||
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
|
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
|
||||||
|
|
||||||
osConfigure(vm)
|
vagrant_defaults = '../vagrantdefaults.rb'
|
||||||
|
load vagrant_defaults if File.exists?(vagrant_defaults)
|
||||||
|
|
||||||
|
defaultOSConfigure(vm)
|
||||||
|
|
||||||
if !RELEASE_VERSION.empty?
|
if !RELEASE_VERSION.empty?
|
||||||
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
|
||||||
@ -30,14 +33,12 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
|
|
||||||
if roles.include?("server") && role_num == 0
|
if roles.include?("server") && role_num == 0
|
||||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||||
k3s.installer_url = 'https://get.k3s.io'
|
|
||||||
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
|
k3s.args = %W[server --cluster-init --node-external-ip=#{NETWORK_PREFIX}.100 --flannel-iface=eth1]
|
||||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||||
end
|
end
|
||||||
elsif roles.include?("server") && role_num != 0
|
elsif roles.include?("server") && role_num != 0
|
||||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||||
k3s.installer_url = 'https://get.k3s.io'
|
|
||||||
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
k3s.args = %W[server --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||||
@ -45,7 +46,6 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
end
|
end
|
||||||
if roles.include?("agent")
|
if roles.include?("agent")
|
||||||
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
|
||||||
k3s.installer_url = 'https://get.k3s.io'
|
|
||||||
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
k3s.args = %W[agent --server https://#{NETWORK_PREFIX}.100:6443 --flannel-iface=eth1]
|
||||||
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}]
|
||||||
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
|
||||||
@ -56,22 +56,6 @@ def provision(vm, roles, role_num, node_num)
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def osConfigure(vm)
|
|
||||||
|
|
||||||
if vm.box.include?("ubuntu2004")
|
|
||||||
vm.provision "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0"
|
|
||||||
vm.provision "shell", inline: "apt install -y jq"
|
|
||||||
end
|
|
||||||
if vm.box.include?("Leap")
|
|
||||||
vm.provision "shell", inline: "zypper install -y jq"
|
|
||||||
end
|
|
||||||
if vm.box.include?("microos")
|
|
||||||
vm.provision "shell", inline: "transactional-update pkg install -y jq"
|
|
||||||
vm.provision 'reload', run: 'once'
|
|
||||||
end
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
|
||||||
# Default provider is libvirt, virtualbox is only provided as a backup
|
# Default provider is libvirt, virtualbox is only provided as a backup
|
||||||
|
Loading…
Reference in New Issue
Block a user