mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Put terraform tests into packages and cleanup upgrade test
Signed-off-by: rancher-max <max.ross@suse.com>
This commit is contained in:
parent
989f3b34fe
commit
ca15e0c5e5
@ -22,9 +22,11 @@ go test -timeout=60m ./tests/terrfaorm/... -run TF
|
||||
```
|
||||
Tests can be run individually with:
|
||||
```bash
|
||||
go test -timeout=30m ./tests/terraform/createcluster.go ./tests/terraform/createcluster_test.go ./tests/terraform/testutils.go
|
||||
go test -timeout=30m ./tests/terraform/createcluster/createcluster.go ./tests/terraform/createcluster/createcluster_test.go
|
||||
# OR
|
||||
go test -v -timeout=30m ./tests/terraform/... -run TFClusterCreateValidation
|
||||
# example with vars:
|
||||
go test -timeout=30m -v ./tests/terraform/createcluster.go ./tests/terraform/createcluster_test.go ./tests/terraform/testutils.go -node_os=ubuntu -aws_ami=ami-02f3416038bdb17fb -cluster_type=etcd -resource_name=localrun1 -sshuser=ubuntu -sshkey="key-name" -destroy=false
|
||||
go test -timeout=30m -v ./tests/terraform/createcluster.go ./tests/terraform/createcluster_test.go -node_os=ubuntu -aws_ami=ami-02f3416038bdb17fb -cluster_type=etcd -resource_name=localrun1 -sshuser=ubuntu -sshkey="key-name" -destroy=false
|
||||
```
|
||||
|
||||
In between tests, if the cluster is not destroyed, then make sure to delete the ./tests/terraform/terraform.tfstate file if you want to create a new cluster.
|
||||
@ -40,4 +42,4 @@ Note: The `go test` default timeout is 10 minutes, thus the `-timeout` flag shou
|
||||
|
||||
# Debugging
|
||||
The cluster and VMs can be retained after a test by passing `-destroy=false`.
|
||||
To focus individual runs on specific test clauses, you can prefix with `F`. For example, in the [create cluster test](../tests/terraform/createcluster_test.go), you can upate the initial creation to be: `FIt("Starts up with no issues", func() {` in order to focus the run on only that clause.
|
||||
To focus individual runs on specific test clauses, you can prefix with `F`. For example, in the [create cluster test](../tests/terraform/createcluster_test.go), you can update the initial creation to be: `FIt("Starts up with no issues", func() {` in order to focus the run on only that clause.
|
@ -1,74 +0,0 @@
|
||||
package terraform
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/terraform"
|
||||
)
|
||||
|
||||
var destroy = flag.Bool("destroy", false, "a bool")
|
||||
var awsAmi = flag.String("aws_ami", "", "a valid ami string like ami-abcxyz123")
|
||||
var nodeOs = flag.String("node_os", "ubuntu", "a string")
|
||||
var externalDb = flag.String("external_db", "mysql", "a string")
|
||||
var arch = flag.String("arch", "amd64", "a string")
|
||||
var clusterType = flag.String("cluster_type", "etcd", "a string")
|
||||
var resourceName = flag.String("resource_name", "etcd", "a string")
|
||||
var sshuser = flag.String("sshuser", "ubuntu", "a string")
|
||||
var sshkey = flag.String("sshkey", "", "a string")
|
||||
var access_key = flag.String("access_key", "", "local path to the private sshkey")
|
||||
var tfVars = flag.String("tfvars", "./modules/k3scluster/config/local.tfvars", "custom .tfvars file")
|
||||
var serverNodes = flag.Int("no_of_server_nodes", 2, "count of server nodes")
|
||||
var workerNodes = flag.Int("no_of_worker_nodes", 1, "count of worker nodes")
|
||||
var failed = false
|
||||
|
||||
var (
|
||||
kubeConfigFile string
|
||||
masterIPs string
|
||||
workerIPs string
|
||||
)
|
||||
|
||||
func BuildCluster(nodeOs, awsAmi string, clusterType, externalDb, resourceName string, t *testing.T, destroy bool, arch string) (string, error) {
|
||||
tDir := "./modules/k3scluster"
|
||||
|
||||
tfDir, err := filepath.Abs(tDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
varDir, err := filepath.Abs(*tfVars)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
TerraformOptions := &terraform.Options{
|
||||
TerraformDir: tfDir,
|
||||
VarFiles: []string{varDir},
|
||||
Vars: map[string]interface{}{
|
||||
"node_os": nodeOs,
|
||||
"aws_ami": awsAmi,
|
||||
"cluster_type": clusterType,
|
||||
"resource_name": resourceName,
|
||||
"external_db": externalDb,
|
||||
"aws_user": *sshuser,
|
||||
"key_name": *sshkey,
|
||||
"access_key": *access_key,
|
||||
"no_of_server_nodes": *serverNodes,
|
||||
"no_of_worker_nodes": *workerNodes,
|
||||
},
|
||||
}
|
||||
|
||||
if destroy {
|
||||
fmt.Printf("Cluster is being deleted")
|
||||
terraform.Destroy(t, TerraformOptions)
|
||||
return "cluster destroyed", err
|
||||
}
|
||||
|
||||
fmt.Printf("Creating Cluster")
|
||||
terraform.InitAndApply(t, TerraformOptions)
|
||||
kubeConfigFile = "/tmp/" + terraform.Output(t, TerraformOptions, "kubeconfig") + "_kubeconfig"
|
||||
masterIPs = terraform.Output(t, TerraformOptions, "master_ips")
|
||||
workerIPs = terraform.Output(t, TerraformOptions, "worker_ips")
|
||||
return "cluster created", err
|
||||
}
|
142
tests/terraform/createcluster/createcluster.go
Normal file
142
tests/terraform/createcluster/createcluster.go
Normal file
@ -0,0 +1,142 @@
|
||||
package createcluster
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/gruntwork-io/terratest/modules/terraform"
|
||||
tf "github.com/k3s-io/k3s/tests/terraform"
|
||||
)
|
||||
|
||||
var (
|
||||
KubeConfigFile string
|
||||
MasterIPs string
|
||||
WorkerIPs string
|
||||
)
|
||||
|
||||
type options struct {
|
||||
nodeOs string
|
||||
awsAmi string
|
||||
clusterType string
|
||||
resourceName string
|
||||
externalDb string
|
||||
sshuser string
|
||||
sshkey string
|
||||
accessKey string
|
||||
serverNodes int
|
||||
workerNodes int
|
||||
}
|
||||
|
||||
func ClusterOptions(os ...ClusterOption) map[string]interface{} {
|
||||
opts := options{}
|
||||
for _, o := range os {
|
||||
opts = o(opts)
|
||||
}
|
||||
return map[string]interface{}{
|
||||
"node_os": opts.nodeOs,
|
||||
"aws_ami": opts.awsAmi,
|
||||
"cluster_type": opts.clusterType,
|
||||
"resource_name": opts.resourceName,
|
||||
"external_db": opts.externalDb,
|
||||
"aws_user": opts.sshuser,
|
||||
"key_name": opts.sshkey,
|
||||
"access_key": opts.accessKey,
|
||||
"no_of_server_nodes": opts.serverNodes,
|
||||
"no_of_worker_nodes": opts.workerNodes,
|
||||
}
|
||||
}
|
||||
|
||||
func BuildCluster(t *testing.T, tfVarsPath string, destroy bool, terraformVars map[string]interface{}) (string, error) {
|
||||
basepath := tf.GetBasepath()
|
||||
tfDir, err := filepath.Abs(basepath + "/tests/terraform/modules/k3scluster")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
varDir, err := filepath.Abs(basepath + tfVarsPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
TerraformOptions := &terraform.Options{
|
||||
TerraformDir: tfDir,
|
||||
VarFiles: []string{varDir},
|
||||
Vars: terraformVars,
|
||||
}
|
||||
|
||||
if destroy {
|
||||
fmt.Printf("Cluster is being deleted")
|
||||
terraform.Destroy(t, TerraformOptions)
|
||||
return "cluster destroyed", err
|
||||
}
|
||||
|
||||
fmt.Printf("Creating Cluster")
|
||||
terraform.InitAndApply(t, TerraformOptions)
|
||||
KubeConfigFile = "/tmp/" + terraform.Output(t, TerraformOptions, "kubeconfig") + "_kubeconfig"
|
||||
MasterIPs = terraform.Output(t, TerraformOptions, "master_ips")
|
||||
WorkerIPs = terraform.Output(t, TerraformOptions, "worker_ips")
|
||||
return "cluster created", err
|
||||
}
|
||||
|
||||
type ClusterOption func(o options) options
|
||||
|
||||
func NodeOs(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.nodeOs = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func AwsAmi(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.awsAmi = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func ClusterType(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.clusterType = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func ResourceName(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.resourceName = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func ExternalDb(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.externalDb = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func Sshuser(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.sshuser = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func Sshkey(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.sshkey = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func AccessKey(n string) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.accessKey = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func ServerNodes(n int) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.serverNodes = n
|
||||
return o
|
||||
}
|
||||
}
|
||||
func WorkerNodes(n int) ClusterOption {
|
||||
return func(o options) options {
|
||||
o.workerNodes = n
|
||||
return o
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
package terraform
|
||||
package createcluster
|
||||
|
||||
import (
|
||||
"flag"
|
||||
@ -6,10 +6,29 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
tf "github.com/k3s-io/k3s/tests/terraform"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var awsAmi = flag.String("aws_ami", "", "a valid ami string like ami-abcxyz123")
|
||||
var nodeOs = flag.String("node_os", "ubuntu", "a string")
|
||||
var externalDb = flag.String("external_db", "mysql", "a string")
|
||||
var arch = flag.String("arch", "amd64", "a string")
|
||||
var clusterType = flag.String("cluster_type", "etcd", "a string")
|
||||
var resourceName = flag.String("resource_name", "etcd", "a string")
|
||||
var sshuser = flag.String("sshuser", "ubuntu", "a string")
|
||||
var sshkey = flag.String("sshkey", "", "a string")
|
||||
var accessKey = flag.String("access_key", "", "local path to the private sshkey")
|
||||
var serverNodes = flag.Int("no_of_server_nodes", 2, "count of server nodes")
|
||||
var workerNodes = flag.Int("no_of_worker_nodes", 1, "count of worker nodes")
|
||||
|
||||
var tfVars = flag.String("tfvars", "/tests/terraform/modules/k3scluster/config/local.tfvars", "custom .tfvars file from base project path")
|
||||
var destroy = flag.Bool("destroy", false, "a bool")
|
||||
|
||||
var failed = false
|
||||
var terraformOptions map[string]interface{}
|
||||
|
||||
func Test_TFClusterCreateValidation(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
flag.Parse()
|
||||
@ -17,10 +36,14 @@ func Test_TFClusterCreateValidation(t *testing.T) {
|
||||
RunSpecs(t, "Create Cluster Test Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
terraformOptions = ClusterOptions(NodeOs(*nodeOs), AwsAmi(*awsAmi), ClusterType(*clusterType), ExternalDb(*externalDb), ResourceName(*resourceName), AccessKey(*accessKey), Sshuser(*sshuser), ServerNodes(*serverNodes), WorkerNodes(*workerNodes), Sshkey(*sshkey))
|
||||
})
|
||||
|
||||
var _ = Describe("Test:", func() {
|
||||
Context("Build Cluster:", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
status, err := BuildCluster(*nodeOs, *awsAmi, *clusterType, *externalDb, *resourceName, &testing.T{}, false, *arch)
|
||||
status, err := BuildCluster(&testing.T{}, *tfVars, false, terraformOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(status).To(Equal("cluster created"))
|
||||
defer GinkgoRecover()
|
||||
@ -30,19 +53,19 @@ var _ = Describe("Test:", func() {
|
||||
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "\nBACKEND", *externalDb)
|
||||
}
|
||||
fmt.Printf("\nIPs:\n")
|
||||
fmt.Println("Server Node IPS:", masterIPs)
|
||||
fmt.Println("Agent Node IPS:", workerIPs)
|
||||
fmt.Println(kubeConfigFile)
|
||||
Expect(kubeConfigFile).Should(ContainSubstring(*resourceName))
|
||||
fmt.Println("Server Node IPS:", MasterIPs)
|
||||
fmt.Println("Agent Node IPS:", WorkerIPs)
|
||||
fmt.Println(KubeConfigFile)
|
||||
Expect(KubeConfigFile).Should(ContainSubstring(*resourceName))
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
defer func() {
|
||||
_, err := ParseNodes(kubeConfigFile, true)
|
||||
_, err := tf.ParseNodes(KubeConfigFile, true)
|
||||
if err != nil {
|
||||
fmt.Println("Error retrieving nodes: ", err)
|
||||
}
|
||||
_, err = ParsePods(kubeConfigFile, true)
|
||||
_, err = tf.ParsePods(KubeConfigFile, true)
|
||||
if err != nil {
|
||||
fmt.Println("Error retrieving pods: ", err)
|
||||
}
|
||||
@ -51,7 +74,7 @@ var _ = Describe("Test:", func() {
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
expectedNodeCount := *serverNodes + *workerNodes + 1
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := tf.ParseNodes(KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec")
|
||||
for _, node := range nodes {
|
||||
@ -61,7 +84,7 @@ var _ = Describe("Test:", func() {
|
||||
|
||||
fmt.Printf("\nFetching pod status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := ParsePods(kubeConfigFile, false)
|
||||
pods, err := tf.ParsePods(KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
@ -75,22 +98,22 @@ var _ = Describe("Test:", func() {
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := DeployWorkload("clusterip.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("clusterip.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should((ContainSubstring("test-clusterip")))
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
|
||||
clusterip, _ := FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||
clusterip, _ := tf.FetchClusterIP(KubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "curl -sL --insecure http://" + clusterip + "/name.html"
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(KubeConfigFile)
|
||||
for _, ip := range nodeExternalIP {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCmdOnNode(cmd, ip, *sshuser, *access_key)
|
||||
res, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-clusterip"))
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
@ -98,24 +121,24 @@ var _ = Describe("Test:", func() {
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := DeployWorkload("nodeport.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("nodeport.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := RunCommand(cmd)
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(KubeConfigFile)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, ip := range nodeExternalIP {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html"
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCommand(cmd)
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
@ -123,24 +146,24 @@ var _ = Describe("Test:", func() {
|
||||
})
|
||||
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
_, err := DeployWorkload("loadbalancer.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("loadbalancer.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := RunCommand(cmd)
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(KubeConfigFile)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ip := range nodeExternalIP {
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "curl -sL --insecure http://" + ip + ":" + port + "/name.html"
|
||||
res, err := RunCommand(cmd)
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
@ -148,23 +171,23 @@ var _ = Describe("Test:", func() {
|
||||
})
|
||||
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := DeployWorkload("ingress.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("ingress.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
ingressIps, err := FetchIngressIP(kubeConfigFile)
|
||||
ingressIps, err := tf.FetchIngressIP(KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned")
|
||||
|
||||
for _, ip := range ingressIps {
|
||||
cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCommand(cmd)
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
@ -172,86 +195,86 @@ var _ = Describe("Test:", func() {
|
||||
})
|
||||
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := DeployWorkload("daemonset.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("daemonset.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := ParseNodes(kubeConfigFile, false)
|
||||
pods, _ := ParsePods(kubeConfigFile, false)
|
||||
nodes, _ := tf.ParseNodes(KubeConfigFile, false)
|
||||
pods, _ := tf.ParsePods(KubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
count := CountOfStringInSlice("test-daemonset", pods)
|
||||
count := tf.CountOfStringInSlice("test-daemonset", pods)
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies Local Path Provisioner storage ", func() {
|
||||
_, err := DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("local-path-provisioner.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
|
||||
g.Expect(res).Should(ContainSubstring("Bound"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
_, err = RunCommand(cmd)
|
||||
cmd := "kubectl --kubeconfig=" + KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
_, err = tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
_, err = RunCommand(cmd)
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + KubeConfigFile
|
||||
_, err = tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
_, err = DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *arch)
|
||||
_, err = tf.DeployWorkload("local-path-provisioner.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("pod/local-path-provisioner"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "kubectl exec volume-test --kubeconfig=" + kubeConfigFile + " -- cat /data/test"
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + KubeConfigFile + " -- cat /data/test"
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("local-path-test"))
|
||||
}, "180s", "2s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := DeployWorkload("dnsutils.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("dnsutils.yaml", KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
res, _ := RunCommand(cmd)
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + KubeConfigFile
|
||||
res, _ := tf.RunCommand(cmd)
|
||||
g.Expect(res).Should(ContainSubstring("dnsutils"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
|
||||
res, _ := RunCommand(cmd)
|
||||
cmd := "kubectl --kubeconfig=" + KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
|
||||
res, _ := tf.RunCommand(cmd)
|
||||
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
})
|
||||
@ -265,19 +288,19 @@ var _ = AfterEach(func() {
|
||||
var _ = BeforeEach(func() {
|
||||
failed = failed || CurrentSpecReport().Failed()
|
||||
if *destroy {
|
||||
fmt.Printf("\nCluster is being Deleted\n")
|
||||
Skip("Cluster is being Deleted")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
fmt.Println("FAILED!")
|
||||
} else if *destroy {
|
||||
status, err := BuildCluster(*nodeOs, *awsAmi, *clusterType, *externalDb, *resourceName, &testing.T{}, *destroy, *arch)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(status).To(Equal("cluster destroyed"))
|
||||
} else {
|
||||
fmt.Println("PASSED!")
|
||||
}
|
||||
if *destroy {
|
||||
status, err := BuildCluster(&testing.T{}, *tfVars, *destroy, terraformOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(status).To(Equal("cluster destroyed"))
|
||||
}
|
||||
})
|
@ -7,6 +7,7 @@ import (
|
||||
"log"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -37,6 +38,11 @@ var SSHKEY string
|
||||
var SSHUSER string
|
||||
var err error
|
||||
|
||||
func GetBasepath() string {
|
||||
_, b, _, _ := runtime.Caller(0)
|
||||
return filepath.Join(filepath.Dir(b), "../..")
|
||||
}
|
||||
|
||||
func checkError(e error) {
|
||||
if e != nil {
|
||||
log.Fatal(err)
|
||||
@ -118,9 +124,9 @@ func CountOfStringInSlice(str string, pods []Pod) int {
|
||||
}
|
||||
|
||||
func DeployWorkload(workload, kubeconfig string, arch string) (string, error) {
|
||||
resourceDir := "./amd64_resource_files"
|
||||
resourceDir := GetBasepath() + "/tests/terraform/amd64_resource_files"
|
||||
if arch == "arm64" {
|
||||
resourceDir = "./arm_resource_files"
|
||||
resourceDir = GetBasepath() + "/tests/terraform/arm_resource_files"
|
||||
}
|
||||
files, err := ioutil.ReadDir(resourceDir)
|
||||
if err != nil {
|
||||
|
@ -1,4 +1,4 @@
|
||||
package terraform
|
||||
package upgradecluster
|
||||
|
||||
import (
|
||||
"flag"
|
||||
@ -6,11 +6,30 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
tf "github.com/k3s-io/k3s/tests/terraform"
|
||||
cc "github.com/k3s-io/k3s/tests/terraform/createcluster"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var upgradeVersion = flag.String("upgrade_version", "", "a string")
|
||||
var awsAmi = flag.String("aws_ami", "", "a valid ami string like ami-abcxyz123")
|
||||
var nodeOs = flag.String("node_os", "ubuntu", "a string")
|
||||
var externalDb = flag.String("external_db", "mysql", "a string")
|
||||
var arch = flag.String("arch", "amd64", "a string")
|
||||
var clusterType = flag.String("cluster_type", "etcd", "a string")
|
||||
var resourceName = flag.String("resource_name", "etcd", "a string")
|
||||
var sshuser = flag.String("sshuser", "ubuntu", "a string")
|
||||
var sshkey = flag.String("sshkey", "", "a string")
|
||||
var accessKey = flag.String("access_key", "", "local path to the private sshkey")
|
||||
var serverNodes = flag.Int("no_of_server_nodes", 2, "count of server nodes")
|
||||
var workerNodes = flag.Int("no_of_worker_nodes", 1, "count of worker nodes")
|
||||
|
||||
var tfVars = flag.String("tfvars", "/tests/terraform/modules/k3scluster/config/local.tfvars", "custom .tfvars file from base project path")
|
||||
var destroy = flag.Bool("destroy", false, "a bool")
|
||||
|
||||
var failed = false
|
||||
var terraformOptions map[string]interface{}
|
||||
|
||||
func Test_TFClusterUpgradeValidation(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
@ -18,146 +37,158 @@ func Test_TFClusterUpgradeValidation(t *testing.T) {
|
||||
RunSpecs(t, "Upgrade Cluster Test Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
terraformOptions = cc.ClusterOptions(cc.NodeOs(*nodeOs), cc.AwsAmi(*awsAmi), cc.ClusterType(*clusterType), cc.ExternalDb(*externalDb), cc.ResourceName(*resourceName), cc.AccessKey(*accessKey), cc.Sshuser(*sshuser), cc.ServerNodes(*serverNodes), cc.WorkerNodes(*workerNodes), cc.Sshkey(*sshkey))
|
||||
})
|
||||
|
||||
var _ = Describe("Test:", func() {
|
||||
Context("Build Cluster:", func() {
|
||||
It("Starts up with no issues", func() {
|
||||
status, err := BuildCluster(*nodeOs, *awsAmi, *clusterType, *externalDb, *resourceName, &testing.T{}, *destroy, *arch)
|
||||
status, err := cc.BuildCluster(&testing.T{}, *tfVars, false, terraformOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(status).To(Equal("cluster created"))
|
||||
defer GinkgoRecover()
|
||||
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "BACKEND", *clusterType, *externalDb)
|
||||
if strings.Contains(*clusterType, "etcd") {
|
||||
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "\nBACKEND", *clusterType)
|
||||
} else {
|
||||
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "\nBACKEND", *externalDb)
|
||||
}
|
||||
fmt.Printf("\nIPs:\n")
|
||||
fmt.Println("Server Node IPS:", masterIPs)
|
||||
fmt.Println("Agent Node IPS:", workerIPs)
|
||||
fmt.Println(kubeConfigFile)
|
||||
Expect(kubeConfigFile).Should(ContainSubstring(*resourceName))
|
||||
fmt.Println("Server Node IPS:", cc.MasterIPs)
|
||||
fmt.Println("Agent Node IPS:", cc.WorkerIPs)
|
||||
fmt.Println(cc.KubeConfigFile)
|
||||
Expect(cc.KubeConfigFile).Should(ContainSubstring(*resourceName))
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status", func() {
|
||||
defer func() {
|
||||
_, err := tf.ParseNodes(cc.KubeConfigFile, true)
|
||||
if err != nil {
|
||||
fmt.Println("Error retrieving nodes: ", err)
|
||||
}
|
||||
_, err = tf.ParsePods(cc.KubeConfigFile, true)
|
||||
if err != nil {
|
||||
fmt.Println("Error retrieving pods: ", err)
|
||||
}
|
||||
}()
|
||||
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
expectedNodeCount := *serverNodes + *workerNodes + 1
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := ParseNodes(kubeConfigFile, false)
|
||||
nodes, err := tf.ParseNodes(cc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec")
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state")
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
fmt.Printf("\nFetching pod status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := ParsePods(kubeConfigFile, false)
|
||||
pods, err := tf.ParsePods(cc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
|
||||
} else {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
g.Expect(pod.Restarts).Should(Equal("0"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = ParsePods(kubeConfigFile, true)
|
||||
}, "600s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
_, err := DeployWorkload("clusterip.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("clusterip.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should((ContainSubstring("test-clusterip")))
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
|
||||
clusterip, _ := FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
clusterip, _ := tf.FetchClusterIP(cc.KubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "curl -sL --insecure http://" + clusterip + "/name.html"
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
|
||||
for _, ip := range nodeExternalIP {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCmdOnNode(cmd, ip, *sshuser, *sshkey)
|
||||
res, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-clusterip"))
|
||||
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies NodePort Service", func() {
|
||||
_, err := DeployWorkload("nodeport.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("nodeport.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := RunCommand(cmd)
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, ip := range nodeExternalIP {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + nodeport + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html"
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCommand(cmd)
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
_, err := DeployWorkload("loadbalancer.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("loadbalancer.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := RunCommand(cmd)
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ip := range nodeExternalIP {
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
res, err := RunCommand(cmd)
|
||||
cmd = "curl -sL --insecure http://" + ip + ":" + port + "/name.html"
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Verifies Ingress", func() {
|
||||
_, err := DeployWorkload("ingress.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("ingress.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
ingressIps, err := FetchIngressIP(kubeConfigFile)
|
||||
ingressIps, err := tf.FetchIngressIP(cc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned")
|
||||
|
||||
for _, ip := range ingressIps {
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
|
||||
cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCommand(cmd)
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
@ -165,165 +196,152 @@ var _ = Describe("Test:", func() {
|
||||
})
|
||||
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := DeployWorkload("daemonset.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("daemonset.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := ParseNodes(kubeConfigFile, false)
|
||||
pods, _ := ParsePods(kubeConfigFile, false)
|
||||
nodes, _ := tf.ParseNodes(cc.KubeConfigFile, false)
|
||||
pods, _ := tf.ParsePods(cc.KubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
count := CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
count := tf.CountOfStringInSlice("test-daemonset", pods)
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies Local Path Provisioner storage ", func() {
|
||||
_, err := DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("local-path-provisioner.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
|
||||
g.Expect(res).Should(ContainSubstring("Bound"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
_, err = RunCommand(cmd)
|
||||
cmd := "kubectl --kubeconfig=" + cc.KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
|
||||
_, err = tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("Data stored in pvc: local-path-test")
|
||||
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd = "kubectl delete pod volume-test --kubeconfig=" + cc.KubeConfigFile
|
||||
_, err = tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
_, err = DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *arch)
|
||||
|
||||
_, err = tf.DeployWorkload("local-path-provisioner.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||
res, _ := RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("pod/local-path-provisioner"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
||||
res, err = RunCommand(cmd)
|
||||
cmd = "kubectl exec volume-test --kubeconfig=" + cc.KubeConfigFile + " -- cat /data/test"
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("Data after re-creation", res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-test"))
|
||||
}, "180s", "2s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies dns access", func() {
|
||||
_, err := DeployWorkload("dnsutils.yaml", kubeConfigFile, *arch)
|
||||
_, err := tf.DeployWorkload("dnsutils.yaml", cc.KubeConfigFile, *arch)
|
||||
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
res, _ := RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + cc.KubeConfigFile
|
||||
res, _ := tf.RunCommand(cmd)
|
||||
g.Expect(res).Should(ContainSubstring("dnsutils"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
|
||||
res, _ := RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
cmd := "kubectl --kubeconfig=" + cc.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
|
||||
res, _ := tf.RunCommand(cmd)
|
||||
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("\nVerify Cluster is upgraded", func() {
|
||||
if *destroy {
|
||||
//fmt.Printf("\nCluster is Deleted\n")
|
||||
return
|
||||
}
|
||||
MIPs := strings.Split(masterIPs, ",")
|
||||
fmt.Println(MIPs)
|
||||
It("Verify Cluster is upgraded", func() {
|
||||
MIPs := strings.Split(cc.MasterIPs, ",")
|
||||
for _, ip := range MIPs {
|
||||
fmt.Println(ip)
|
||||
|
||||
cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/master_cmd"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func(g Gomega) {
|
||||
result, err := RunCmdOnNode(cmd, ip, *sshuser, *sshkey)
|
||||
fmt.Println(result)
|
||||
|
||||
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
cmd = "sudo chmod u+x /tmp/master_cmd && sudo /tmp/master_cmd"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := RunCmdOnNode(cmd, ip, *sshuser, *sshkey)
|
||||
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
}
|
||||
|
||||
WIPs := strings.Split(workerIPs, ",")
|
||||
for i := 0; i < len(WIPs) && len(WIPs[0]) > 1; i++ {
|
||||
ip := WIPs[i]
|
||||
strings.TrimSpace(WIPs[i])
|
||||
WIPs := strings.Split(cc.WorkerIPs, ",")
|
||||
for _, ip := range WIPs {
|
||||
// for i := 0; i < len(WIPs) && len(WIPs[0]) > 1; i++ {
|
||||
// ip := WIPs[i]
|
||||
// strings.TrimSpace(WIPs[i])
|
||||
cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/agent_cmd"
|
||||
fmt.Println(cmd)
|
||||
fmt.Println(*sshuser)
|
||||
fmt.Println(*sshkey)
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := RunCmdOnNode(cmd, ip, *sshuser, *sshkey)
|
||||
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
cmd = "sudo chmod u+x /tmp/agent_cmd && sudo /tmp/agent_cmd"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func(g Gomega) {
|
||||
_, err := RunCmdOnNode(cmd, ip, *sshuser, *sshkey)
|
||||
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("Checks Node and Pod Status after upgrade", func() {
|
||||
fmt.Printf("\nFetching node status after upgrade\n")
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := ParseNodes(kubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"))
|
||||
g.Expect(node.Version).Should(ContainSubstring(*upgradeVersion))
|
||||
defer func() {
|
||||
_, err := tf.ParseNodes(cc.KubeConfigFile, true)
|
||||
if err != nil {
|
||||
fmt.Println("Error retrieving nodes: ", err)
|
||||
}
|
||||
_, err = tf.ParsePods(cc.KubeConfigFile, true)
|
||||
if err != nil {
|
||||
fmt.Println("Error retrieving pods: ", err)
|
||||
}
|
||||
}()
|
||||
|
||||
fmt.Printf("\nFetching node status\n")
|
||||
expectedNodeCount := *serverNodes + *workerNodes + 1
|
||||
Eventually(func(g Gomega) {
|
||||
nodes, err := tf.ParseNodes(cc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec")
|
||||
for _, node := range nodes {
|
||||
g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state")
|
||||
g.Expect(node.Version).Should(ContainSubstring(*upgradeVersion))
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = ParseNodes(kubeConfigFile, true)
|
||||
|
||||
fmt.Printf("\nFetching Pods status\n")
|
||||
fmt.Printf("\nFetching pod status\n")
|
||||
Eventually(func(g Gomega) {
|
||||
pods, err := ParsePods(kubeConfigFile, false)
|
||||
pods, err := tf.ParsePods(cc.KubeConfigFile, false)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "helm-install") {
|
||||
@ -332,186 +350,176 @@ var _ = Describe("Test:", func() {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
_, _ = ParsePods(kubeConfigFile, true)
|
||||
}, "600s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Verifies ClusterIP Service", func() {
|
||||
It("Verifies ClusterIP Service after upgrade", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should((ContainSubstring("test-clusterip")))
|
||||
}, "420s", "5s").Should(Succeed())
|
||||
|
||||
clusterip, _ := FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
clusterip, _ := tf.FetchClusterIP(cc.KubeConfigFile, "nginx-clusterip-svc")
|
||||
cmd := "curl -sL --insecure http://" + clusterip + "/name.html"
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
|
||||
for _, ip := range nodeExternalIP {
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCmdOnNode(cmd, ip, *sshuser, *sshkey)
|
||||
res, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-clusterip"))
|
||||
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
It("Verifies NodePort Service", func() {
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := RunCommand(cmd)
|
||||
|
||||
It("Verifies NodePort Service after upgrade", func() {
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
|
||||
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
|
||||
nodeport, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, ip := range nodeExternalIP {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + nodeport + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCommand(cmd)
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-nodeport"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
It("Verifies LoadBalancer Service", func() {
|
||||
nodeExternalIP := FetchNodeExternalIP(kubeConfigFile)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + kubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := RunCommand(cmd)
|
||||
|
||||
It("Verifies LoadBalancer Service after upgrade", func() {
|
||||
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
|
||||
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
|
||||
port, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
for _, ip := range nodeExternalIP {
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd = "curl -L --insecure http://" + ip + ":" + port + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
res, err := RunCommand(cmd)
|
||||
cmd = "curl -sL --insecure http://" + ip + ":" + port + "/name.html"
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
It("Verifies Ingress", func() {
|
||||
|
||||
It("Verifies Ingress after upgrade", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
|
||||
ingressIps, err := FetchIngressIP(kubeConfigFile)
|
||||
ingressIps, err := tf.FetchIngressIP(cc.KubeConfigFile)
|
||||
Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned")
|
||||
|
||||
for _, ip := range ingressIps {
|
||||
cmd := "curl --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
fmt.Println(cmd)
|
||||
|
||||
cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html"
|
||||
Eventually(func(g Gomega) {
|
||||
res, err := RunCommand(cmd)
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("test-ingress"))
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
}
|
||||
})
|
||||
It("Verifies Daemonset", func() {
|
||||
_, err := DeployWorkload("daemonset.yaml", kubeConfigFile, *arch)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
|
||||
|
||||
nodes, _ := ParseNodes(kubeConfigFile, false)
|
||||
pods, _ := ParsePods(kubeConfigFile, false)
|
||||
It("Verifies Daemonset after upgrade", func() {
|
||||
nodes, _ := tf.ParseNodes(cc.KubeConfigFile, false)
|
||||
pods, _ := tf.ParsePods(cc.KubeConfigFile, false)
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
count := CountOfStringInSlice("test-daemonset", pods)
|
||||
fmt.Println("POD COUNT")
|
||||
fmt.Println(count)
|
||||
fmt.Println("NODE COUNT")
|
||||
fmt.Println(len(nodes))
|
||||
count := tf.CountOfStringInSlice("test-daemonset", pods)
|
||||
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
|
||||
}, "420s", "10s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
for _, pod := range pods {
|
||||
if strings.Contains(pod.Name, "test-daemonset") {
|
||||
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
|
||||
}
|
||||
}
|
||||
}, "240s", "5s").Should(Succeed())
|
||||
})
|
||||
|
||||
It("Validating Local Path Provisioner storage after upgrade", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + kubeConfigFile
|
||||
res, _ := RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
g.Expect(res).Should(ContainSubstring("pod/local-path-provisioner"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl get pod volume-test --kubeconfig=" + cc.KubeConfigFile
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println(res)
|
||||
g.Expect(res).Should(ContainSubstring("volume-test"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl exec volume-test cat /data/test --kubeconfig=" + kubeConfigFile
|
||||
res, err := RunCommand(cmd)
|
||||
cmd := "kubectl exec volume-test --kubeconfig=" + cc.KubeConfigFile + " -- cat /data/test"
|
||||
res, err := tf.RunCommand(cmd)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
fmt.Println("Data after upgrade", res)
|
||||
g.Expect(res).Should(ContainSubstring("local-path-test"))
|
||||
}, "180s", "2s").Should(Succeed())
|
||||
})
|
||||
It("Verifies dns access", func() {
|
||||
|
||||
It("Verifies dns access after upgrade", func() {
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + kubeConfigFile
|
||||
res, _ := RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
cmd := "kubectl get pods dnsutils --kubeconfig=" + cc.KubeConfigFile
|
||||
res, _ := tf.RunCommand(cmd)
|
||||
g.Expect(res).Should(ContainSubstring("dnsutils"))
|
||||
g.Expect(res).Should(ContainSubstring("Running"))
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
|
||||
Eventually(func(g Gomega) {
|
||||
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
|
||||
res, _ := RunCommand(cmd)
|
||||
fmt.Println(res)
|
||||
cmd := "kubectl --kubeconfig=" + cc.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
|
||||
res, _ := tf.RunCommand(cmd)
|
||||
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
|
||||
|
||||
}, "420s", "2s").Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = AfterEach(func() {
|
||||
failed = failed || CurrentGinkgoTestDescription().Failed
|
||||
failed = failed || CurrentSpecReport().Failed()
|
||||
})
|
||||
|
||||
var _ = BeforeEach(func() {
|
||||
failed = failed || CurrentGinkgoTestDescription().Failed
|
||||
failed = failed || CurrentSpecReport().Failed()
|
||||
if *destroy {
|
||||
fmt.Printf("\nCluster is being Deleted\n")
|
||||
Skip("Cluster is being Deleted")
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
// var _ = AfterSuite(func() {
|
||||
// if failed {
|
||||
// fmt.Println("FAILED!")
|
||||
// } else {
|
||||
// kubeConfigFile, masterIPs, workerIPs, err = BuildCluster(*nodeOs, *awsAmi, *clusterType, *externalDb, *resourceName, &testing.T{}, *destroy, *arch)
|
||||
// if err != nil {
|
||||
// fmt.Println("Error Destroying Cluster", err)
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
var _ = AfterSuite(func() {
|
||||
if failed {
|
||||
fmt.Println("FAILED!")
|
||||
} else {
|
||||
fmt.Println("PASSED!")
|
||||
}
|
||||
if *destroy {
|
||||
status, err := cc.BuildCluster(&testing.T{}, *tfVars, *destroy, terraformOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(status).To(Equal("cluster destroyed"))
|
||||
}
|
||||
})
|
Loading…
Reference in New Issue
Block a user