Enable E2E testing with local k3s binary

Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
Derek Nola 2022-09-08 14:31:50 -07:00
parent f88cd3bfb6
commit 1972fb7cd6
3 changed files with 54 additions and 18 deletions

View File

@ -26,6 +26,10 @@ spec:
mountPath: /data mountPath: /data
ports: ports:
- containerPort: 80 - containerPort: 80
# This is only used if the regcred secret is created
# which increases the dockerhub pull rate limit
imagePullSecrets:
- name: regcred
volumes: volumes:
- name: volv - name: volv
persistentVolumeClaim: persistentVolumeClaim:

View File

@ -128,6 +128,8 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount) serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
var testOptions string var testOptions string
var cmd string
for _, env := range os.Environ() { for _, env := range os.Environ() {
if strings.HasPrefix(env, "E2E_") { if strings.HasPrefix(env, "E2E_") {
testOptions += " " + env testOptions += " " + env
@ -135,14 +137,27 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
} }
testOptions += " E2E_RELEASE_VERSION=skip" testOptions += " E2E_RELEASE_VERSION=skip"
cmd := fmt.Sprintf(`%s vagrant up --no-provision &> vagrant.log`, nodeEnvs) // Bring up the all of the nodes in parallel
if _, err := RunCommand(cmd); err != nil { errg, _ := errgroup.WithContext(context.Background())
return nil, nil, fmt.Errorf("failed creating nodes: %s: %v", cmd, err) for i, node := range append(serverNodeNames, agentNodeNames...) {
if i == 0 {
cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &> vagrant.log`, nodeEnvs, testOptions, node)
} else {
cmd = fmt.Sprintf(`%s %s vagrant up --no-provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
} }
errg.Go(func() error {
nodeRoles := append(serverNodeNames, agentNodeNames...) if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
for _, node := range nodeRoles { }
return nil
})
// libVirt/Virtualbox needs some time between provisioning nodes
time.Sleep(10 * time.Second)
}
if err := errg.Wait(); err != nil {
return nil, nil, err
}
for _, node := range append(serverNodeNames, agentNodeNames...) {
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node) cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil { if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
@ -152,9 +167,21 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
} }
} }
cmd = fmt.Sprintf(`%s %s vagrant provision &>> vagrant.log`, nodeEnvs, testOptions) // Install K3s on all nodes in parallel
errg, _ = errgroup.WithContext(context.Background())
for _, node := range append(serverNodeNames, agentNodeNames...) {
cmd = fmt.Sprintf(`%s %s vagrant provision %s &>> vagrant.log`, nodeEnvs, testOptions, node)
errg.Go(func() error {
if _, err := RunCommand(cmd); err != nil { if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err) return fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
}
return nil
})
// K3s needs some time between joining nodes to avoid learner issues
time.Sleep(20 * time.Second)
}
if err := errg.Wait(); err != nil {
return nil, nil, err
} }
return serverNodeNames, agentNodeNames, nil return serverNodeNames, agentNodeNames, nil
@ -334,7 +361,7 @@ func DockerLogin(kubeConfig string, ci bool) error {
return nil return nil
} }
// Authenticate to docker hub to increade pull limit // Authenticate to docker hub to increade pull limit
cmd := fmt.Sprintf("kubectl create secret docker-registry regcred --from-file=%s --type=kubernetes.io/dockerconfigjson --kubeconfig=%s", cmd := fmt.Sprintf("kubectl create secret docker-registry regcred --from-file=.dockerconfigjson=%s --kubeconfig=%s",
"../amd64_resource_files/docker_cred.json", kubeConfig) "../amd64_resource_files/docker_cred.json", kubeConfig)
res, err := RunCommand(cmd) res, err := RunCommand(cmd)
if err != nil { if err != nil {

View File

@ -20,6 +20,7 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes") var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false") var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI") var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info: // Environment Variables Info:
// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd) // E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd)
@ -41,7 +42,11 @@ var _ = Describe("Verify Create", Ordered, func() {
Context("Cluster :", func() { Context("Cluster :", func() {
It("Starts up with no issues", func() { It("Starts up with no issues", func() {
var err error var err error
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog()) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog())
fmt.Println("CLUSTER CONFIG") fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS) fmt.Println("OS:", *nodeOS)
@ -79,8 +84,8 @@ var _ = Describe("Verify Create", Ordered, func() {
}) })
It("Verifies ClusterIP Service", func() { It("Verifies ClusterIP Service", func() {
_, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened) res, err := e2e.DeployWorkload("clusterip.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed") Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed: "+res)
Eventually(func(g Gomega) { Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
@ -209,8 +214,8 @@ var _ = Describe("Verify Create", Ordered, func() {
}) })
It("Verifies Local Path Provisioner storage ", func() { It("Verifies Local Path Provisioner storage ", func() {
_, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed") Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res)
Eventually(func(g Gomega) { Eventually(func(g Gomega) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
@ -233,7 +238,7 @@ var _ = Describe("Verify Create", Ordered, func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
res, err := e2e.RunCommand(cmd) res, err = e2e.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res) Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
_, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) _, err = e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
@ -267,7 +272,7 @@ var _ = Describe("Verify Create", Ordered, func() {
var failed = false var failed = false
var _ = AfterEach(func() { var _ = AfterEach(func() {
failed = failed || CurrentGinkgoTestDescription().Failed failed = failed || CurrentSpecReport().Failed()
}) })
var _ = AfterSuite(func() { var _ = AfterSuite(func() {