E2E: Dualstack test (#5617)

* E2E dualstack test
* Improve testing documentation

Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
Derek Nola 2022-06-14 08:40:29 -07:00 committed by GitHub
parent d4522de06a
commit 12695cea15
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 467 additions and 38 deletions

View File

@ -42,4 +42,10 @@ To run the all E2E tests and generate JUnit testing reports:
ginkgo --junit-report=result.xml ./tests/e2e/...
```
Note: The `go test` default timeout is 10 minutes, thus the `-timeout` flag should be used. The `ginkgo` default timeout is 1 hour, no timeout flag is needed.
Note: The `go test` default timeout is 10 minutes, thus the `-timeout` flag should be used. The `ginkgo` default timeout is 1 hour, no timeout flag is needed.
# Debugging
In the event of a test failure, the cluster and VMs are retained in their broken state. Startup logs are retained in `vagrant.log`.
To see a list of nodes: `vagrant status`
To ssh into a node: `vagrant ssh <NODE>`
Once you are done/ready to restart the test, use `vagrant destroy -f` to remove the broken cluster.

View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ds-clusterip-pod
spec:
selector:
matchLabels:
k8s-app: nginx-app-clusterip
replicas: 2
template:
metadata:
labels:
k8s-app: nginx-app-clusterip
spec:
containers:
- name: nginx
image: ranchertest/mytestcontainer
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: nginx-app-clusterip
name: ds-clusterip-svc
namespace: default
spec:
type: ClusterIP
ipFamilyPolicy: PreferDualStack
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
k8s-app: nginx-app-clusterip

View File

@ -0,0 +1,16 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ds-ingress
spec:
rules:
- host: testds.com
http:
paths:
- backend:
service:
# Reliant on dualstack_clusterip.yaml
name: ds-clusterip-svc
port:
number: 80
pathType: ImplementationSpecific

View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ds-nodeport-pod
spec:
selector:
matchLabels:
k8s-app: nginx-app-nodeport
replicas: 2
template:
metadata:
labels:
k8s-app: nginx-app-nodeport
spec:
containers:
- name: nginx
image: ranchertest/mytestcontainer
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: nginx-app-nodeport
name: ds-nodeport-svc
namespace: default
spec:
type: NodePort
ipFamilyPolicy: PreferDualStack
ports:
- port: 80
nodePort: 30096
name: http
selector:
k8s-app: nginx-app-nodeport

111
tests/e2e/dualstack/Vagrantfile vendored Normal file
View File

@ -0,0 +1,111 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-0" ])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
NETWORK4_PREFIX = "10.10.10"
NETWORK6_PREFIX = "a11:decf:c0ff:ee"
install_type = ""
def provision(vm, roles, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = "#{roles[0]}-#{role_num}"
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}"
# Only works with libvirt, which allows IPv4 + IPv6 on a single network/interface
vm.network "private_network",
:ip => node_ip4,
:netmask => "255.255.255.0",
:libvirt__dhcp_enabled => false,
:libvirt__forward_mode => "none",
:libvirt__guest_ipv6 => "yes",
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"
vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
vm.provision "IPv6 Setup", type: "shell", path: "../scripts/ipv6.sh", args: [node_ip4, node_ip6, vm.box]
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"
if roles.include?("server") && role_num == 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
k3s.config = <<~YAML
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
cluster-init: true
token: vagrant
cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56
service-cidr: 10.43.0.0/16,2001:cafe:42:1::/112
bind-address: #{NETWORK4_PREFIX}.100
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
elsif roles.include?("server") && role_num != 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
k3s.config = <<~YAML
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
server: https://#{NETWORK4_PREFIX}.100:6443
token: vagrant
cluster-cidr: 10.42.0.0/16,2001:cafe:42:0::/56
service-cidr: 10.43.0.0/16,2001:cafe:42:1::/112
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
if roles.include?("agent")
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
k3s.config = <<~YAML
node-external-ip: #{node_ip4},#{node_ip6}
node-ip: #{node_ip4},#{node_ip6}
server: https://#{NETWORK4_PREFIX}.100:6443
token: vagrant
flannel-iface: eth1
YAML
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt"]
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
config.vm.define name do |node|
roles = name.split("-", -1)
role_num = roles.pop.to_i
provision(node.vm, roles, role_num, i)
end
end
end

View File

@ -0,0 +1,213 @@
package validatecluster
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 0, "number of agent nodes")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s1 or nil for latest commit from master
type objIP struct {
name string
ipv4 string
ipv6 string
}
func getPodIPs(kubeConfigFile string) ([]objIP, error) {
cmd := `kubectl get pods -A -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.podIPs[*].ip}{"\n"}{end}' --kubeconfig=` + kubeConfigFile
return getObjIPs(cmd)
}
func getNodeIPs(kubeConfigFile string) ([]objIP, error) {
cmd := `kubectl get nodes -o jsonpath='{range .items[*]}{.metadata.name}{" "}{.status.addresses[?(@.type == "InternalIP")].address}{"\n"}{end}' --kubeconfig=` + kubeConfigFile
return getObjIPs(cmd)
}
func getObjIPs(cmd string) ([]objIP, error) {
var objIPs []objIP
res, err := e2e.RunCommand(cmd)
if err != nil {
return nil, err
}
objs := strings.Split(res, "\n")
objs = objs[:len(objs)-1]
for _, obj := range objs {
fields := strings.Fields(obj)
if len(fields) > 2 {
objIPs = append(objIPs, objIP{name: fields[0], ipv4: fields[1], ipv6: fields[2]})
} else if len(fields) > 1 {
objIPs = append(objIPs, objIP{name: fields[0], ipv4: fields[1]})
} else {
objIPs = append(objIPs, objIP{name: fields[0]})
}
}
return objIPs, nil
}
func Test_E2EDualStack(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
RunSpecs(t, "Validate DualStack Suite")
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = Describe("Verify DualStack Configuration", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog())
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each node has IPv4 and IPv6", func() {
nodeIPs, err := getNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
Expect(node.ipv4).Should(ContainSubstring("10.10.10"))
Expect(node.ipv6).Should(ContainSubstring("a11:decf:c0ff"))
}
})
It("Verifies that each pod has IPv4 and IPv6", func() {
podIPs, err := getPodIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podIPs {
Expect(pod.ipv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.name)
Expect(pod.ipv6).Should(Or(ContainSubstring("a11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.name)
}
})
It("Verifies ClusterIP Service", func() {
_, err := e2e.DeployWorkload("dualstack_clusterip.yaml", kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("ds-clusterip-pod"))
// Checks both IPv4 and IPv6
clusterips, err := e2e.FetchClusterIP(kubeConfigFile, "ds-clusterip-svc", true)
Expect(err).NotTo(HaveOccurred())
for _, ip := range strings.Split(clusterips, ",") {
if strings.Contains(ip, "::") {
ip = "[" + ip + "]"
}
pods, err := e2e.ParsePods(kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if !strings.HasPrefix(pod.Name, "ds-clusterip-pod") {
continue
}
cmd := fmt.Sprintf("curl -L --insecure http://%s", ip)
Eventually(func() (string, error) {
return e2e.RunCmdOnNode(cmd, serverNodeNames[0])
}, "60s", "5s").Should(ContainSubstring("Welcome to nginx!"), "failed cmd: "+cmd)
}
}
})
It("Verifies Ingress", func() {
_, err := e2e.DeployWorkload("dualstack_ingress.yaml", kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
cmd := "kubectl get ingress ds-ingress --kubeconfig=" + kubeConfigFile + " -o jsonpath=\"{.spec.rules[*].host}\""
hostName, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
nodeIPs, err := getNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
for _, node := range nodeIPs {
cmd := fmt.Sprintf("curl --header host:%s http://%s/name.html", hostName, node.ipv4)
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "10s", "2s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
cmd = fmt.Sprintf("curl --header host:%s http://[%s]/name.html", hostName, node.ipv6)
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "5s", "1s").Should(ContainSubstring("ds-clusterip-pod"), "failed cmd: "+cmd)
}
})
It("Verifies NodePort Service", func() {
_, err := e2e.DeployWorkload("dualstack_nodeport.yaml", kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
cmd := "kubectl get service ds-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd)
nodeIPs, err := getNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
cmd = "curl -L --insecure http://" + node.ipv4 + ":" + nodeport + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
cmd = "curl -L --insecure http://[" + node.ipv6 + "]:" + nodeport + "/name.html"
Eventually(func() (string, error) {
return e2e.RunCommand(cmd)
}, "10s", "1s").Should(ContainSubstring("ds-nodeport-pod"), "failed cmd: "+cmd)
}
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentGinkgoTestDescription().Failed
})
var _ = AfterSuite(func() {
if failed {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

16
tests/e2e/scripts/ipv6.sh Normal file
View File

@ -0,0 +1,16 @@
#!/bin/bash
ip4_addr=$1
ip6_addr=$2
os=$3
sysctl -w net.ipv6.conf.all.disable_ipv6=0
sysctl -w net.ipv6.conf.eth1.accept_dad=0
if [ -z "${os##*ubuntu*}" ]; then
netplan set ethernets.eth1.accept-ra=false
netplan set ethernets.eth1.addresses=["$ip4_addr"/24,"$ip6_addr"/64]
netplan apply
else
ip -6 addr add "$ip6_addr"/64 dev eth1
fi
ip addr show dev eth1

View File

@ -21,14 +21,8 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
if !RELEASE_VERSION.empty?
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
else
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
vm.provision "shell", path: "../scripts/latest_commit.sh", args: [GITHUB_BRANCH, "/tmp/k3s_commits"]
install_type = "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /tmp/k3s_commits)"
end
vm.provision "shell", inline: "ping -c 2 k3s.io"
if role.include?("server") && role_num == 0

View File

@ -10,19 +10,6 @@ NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
def installType(vm)
if RELEASE_VERSION == "skip"
return "INSTALL_K3S_SKIP_DOWNLOAD=true"
elsif !RELEASE_VERSION.empty?
return "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
end
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
# MicroOS requires it not be in a /tmp/ or other root system folder
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vm.provision "Acquire latest commit", type: "shell", path: scripts_location + "/latest_commit.sh", args: [GITHUB_BRANCH, "/home/vagrant/k3s_commits"]
return "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /home/vagrant/k3s_commits)"
end
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
@ -34,7 +21,8 @@ def provision(vm, role, role_num, node_num)
defaultOSConfigure(vm)
install_type = installType(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "ping k3s.io", type: "shell", inline: "ping -c 2 k3s.io"
if node_num == 0 && !role.include?("server") && !role.include?("etcd")

View File

@ -119,7 +119,7 @@ var _ = Describe("Verify Create", func() {
return e2e.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
cmd = "curl -L --insecure http://" + clusterip + "/name.html"
for _, nodeName := range cpNodeNames {
Eventually(func() (string, error) {

View File

@ -97,7 +97,16 @@ func DestroyCluster() error {
return os.Remove("vagrant.log")
}
func FetchClusterIP(kubeconfig string, servicename string) (string, error) {
func FetchClusterIP(kubeconfig string, servicename string, dualStack bool) (string, error) {
if dualStack {
cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIPs}' --kubeconfig=" + kubeconfig
res, err := RunCommand(cmd)
if err != nil {
return res, err
}
res = strings.ReplaceAll(res, "\"", "")
return strings.Trim(res, "[]"), nil
}
cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + kubeconfig
return RunCommand(cmd)
}

View File

@ -84,7 +84,7 @@ var _ = Describe("Verify Upgrade", func() {
return e2e.RunCommand(cmd)
}, "240s", "5s").Should(ContainSubstring("test-clusterip"), "failed cmd: "+cmd)
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
cmd = "curl -L --insecure http://" + clusterip + "/name.html"
for _, nodeName := range serverNodeNames {
Eventually(func() (string, error) {
@ -280,7 +280,7 @@ var _ = Describe("Verify Upgrade", func() {
return e2e.RunCommand(cmd)
}, "420s", "5s").Should(ContainSubstring("test-clusterip"))
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
fmt.Println(cmd)
for _, nodeName := range serverNodeNames {

View File

@ -1,7 +1,7 @@
def defaultOSConfigure(vm)
box = vm.box.to_s
if box.include?("generic/ubuntu")
vm.provision "Set DNS", type: "shell", inline: "systemd-resolve --set-dns=8.8.8.8 --interface=eth0"
vm.provision "Set DNS", type: "shell", inline: "netplan set ethernets.eth0.nameservers.addresses=[8.8.8.8,1.1.1.1]; netplan apply", run: 'once'
vm.provision "Install jq", type: "shell", inline: "apt install -y jq"
elsif box.include?("Leap") || box.include?("Tumbleweed")
vm.provision "Install jq", type: "shell", inline: "zypper install -y jq"
@ -11,4 +11,17 @@ def defaultOSConfigure(vm)
vm.provision "Install jq", type: "shell", inline: "transactional-update pkg install -y jq"
vm.provision 'reload', run: 'once'
end
end
def getInstallType(vm, release_version, branch)
if release_version == "skip"
install_type = "INSTALL_K3S_SKIP_DOWNLOAD=true"
elsif !release_version.empty?
return "INSTALL_K3S_VERSION=#{release_version}"
else
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
# MicroOS requires it not be in a /tmp/ or other root system folder
vm.provision "Get latest commit", type: "shell", path: "../scripts/latest_commit.sh", args: [branch, "/tmp/k3s_commits"]
return "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /tmp/k3s_commits)"
end
end

View File

@ -27,17 +27,8 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
if RELEASE_VERSION == "skip"
install_type = "INSTALL_K3S_SKIP_DOWNLOAD=true"
elsif !RELEASE_VERSION.empty?
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
else
# Grabs the last 5 commit SHA's from the given branch, then purges any commits that do not have a passing CI build
# MicroOS requires it not be in a /tmp/ or other root system folder
vm.provision "Get latest commit", type: "shell", path: scripts_location + "/latest_commit.sh", args: [GITHUB_BRANCH, "/home/vagrant/k3s_commits"]
install_type = "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /home/vagrant/k3s_commits)"
end
vm.provision "shell", inline: "ping -c 2 k3s.io"
db_type = getDBType(role, role_num, vm)

View File

@ -84,7 +84,7 @@ var _ = Describe("Verify Create", func() {
g.Expect(res).Should((ContainSubstring("test-clusterip")))
}, "240s", "5s").Should(Succeed())
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
fmt.Println(cmd)
for _, nodeName := range serverNodeNames {