Add make commands to terraform automation and fix external dbs related issue (#7159)

* test: add make commands and dependencies

Signed-off-by: Francisco <francisco.moral@suse.com>

* fix: fix issue on logic for using external dbs and dependencies

Signed-off-by: Francisco <francisco.moral@suse.com>

---------

Signed-off-by: Francisco <francisco.moral@suse.com>
This commit is contained in:
fmoral2 2023-04-06 12:17:13 -03:00 committed by GitHub
parent 3e3512bdae
commit 6c394abb32
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 775 additions and 356 deletions

4
.gitignore vendored
View File

@ -29,3 +29,7 @@ __pycache__
/tests/.vscode
/sonobuoy-output
*.tmp
config/local.tfvars
*.terraform
*.tfstate
.terraform.lock.hcl

View File

@ -0,0 +1,80 @@
linters:
enable:
- gofmt
- govet
- revive
- gosec
- megacheck
- misspell
- unparam
- exportloopref
- nlreturn
- nestif
- dupl
- gci
- ginkgolinter
linters-settings:
govet:
check-shadowing: true
check-tests: true
nestif:
min-complexity: 4
revive:
confidence: 0.8
severity: warning
ignore-generated-header: true
rules:
- name: line-length-limit
arguments: [100]
- name: cognitive-complexity
arguments: [10]
- name: empty-lines
- name: empty-block
- name: bare-return
- name: blank-imports
- name: confusing-naming
- name: confusing-results
- name: context-as-argument
- name: duplicated-imports
- name: early-return
- name: empty-block
- name: empty-lines
- name: error-naming
- name: error-return
- name: error-strings
- name: errorf
- name: exported
- name: flag-parameter
- name: get-return
- name: if-return
- name: increment-decrement
- name: indent-error-flow
- name: import-shadowing
- name: modifies-parameter
- name: modifies-value-receiver
- name: range
- name: range-val-in-closure
- name: range-val-address
- name: receiver-naming
- name: string-of-int
- name: struct-tag
- name: superfluous-else
- name: time-naming
- name: var-declaration
- name: unconditional-recursion
- name: unexported-naming
- name: unexported-return
- name: unhandled-error
arguments: ["fmt.Printf", "builder.WriteString"]
- name: unnecessary-stmt
- name: unreachable-code
- name: unused-parameter
- name: unused-receiver
issues:
exclude-rules:
- linters: [typecheck]
text: "command-line-arguments"

View File

@ -1,26 +1,27 @@
FROM golang:alpine
ARG TERRAFORM_VERSION=0.12.10
ENV TERRAFORM_VERSION=$TERRAFORM_VERSION
ARG TF_VERSION=1.4.0
ENV TERRAFORM_VERSION $TF_VERSION
RUN apk update && \
apk upgrade --update-cache --available && \
apk add curl git jq bash openssh unzip gcc g++ make ca-certificates && \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" && \
apk add --no-cache curl git jq bash openssh unzip gcc g++ make ca-certificates && \
if [ "$(uname -m)" = "aarch64" ]; then \
KUBE_ARCH="linux/arm64" && \
TF_ARCH="linux_arm64"; \
else \
KUBE_ARCH="linux/amd64" && \
TF_ARCH="linux_amd64"; \
fi && \
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/${KUBE_ARCH}/kubectl" && \
chmod +x ./kubectl && \
mv ./kubectl /usr/local/bin && \
mkdir tmp && \
curl "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" -o tmp/terraform.zip && \
curl "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_${TF_ARCH}.zip" -o tmp/terraform.zip && \
unzip tmp/terraform.zip -d /usr/local/bin && \
chmod +x /usr/local/bin/terraform && \
rm -rf tmp
WORKDIR $GOPATH/src/github.com/k3s-io/k3s
COPY . .
RUN go get github.com/gruntwork-io/terratest/modules/terraform
RUN go get -u github.com/onsi/gomega
RUN go get -u github.com/onsi/ginkgo/v2
RUN go get -u golang.org/x/crypto/...
RUN go get -u github.com/Thatooine/go-test-html-report
COPY . .

75
tests/terraform/Makefile Normal file
View File

@ -0,0 +1,75 @@
##========================= Terraform Tests =========================#
include ./config.mk
TAGNAME ?= default
tf-up:
@cd ../.. && docker build . -q -f ./tests/terraform/Dockerfile.build -t k3s-tf-${TAGNAME}
.PHONY: tf-run
tf-run:
@docker run -d --name k3s-tf-test${IMGNAME} -t \
-e AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" \
-e AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" \
-v ${ACCESS_KEY_LOCAL}:/go/src/github.com/k3s-io/k3s/tests/terraform/modules/k3scluster/config/.ssh/aws_key.pem \
k3s-tf-${TAGNAME} sh -c 'cd ./tests/terraform ; \
if [ -n "${ARGNAME}" ]; then \
go test -v -timeout=45m \
./${TESTDIR}/... \
-"${ARGNAME}"="${ARGVALUE}"; \
elif [ -z "${TESTDIR}" ]; then \
go test -v -timeout=45m \
./createcluster/...; \
else \
go test -v -timeout=45m \
./${TESTDIR}/...; \
fi'
.PHONY: tf-logs
tf-logs:
@docker logs -f k3s-tf-test${IMGNAME}
.PHONY: tf-down
tf-down:
@echo "Removing containers and images"
@docker stop $$(docker ps -a -q --filter="name=k3s-tf*")
@docker rm $$(docker ps -a -q --filter="name=k3s-tf*") ; \
docker rmi --force $$(docker images -q --filter="reference=k3s-tf*")
tf-clean:
@./scripts/delete_resources.sh
.PHONY: tf-complete
tf-complete: tf-clean tf-down tf-remove-state tf-up tf-run
#========================= Run terraform tests locally =========================#
.PHONY: tf-create
tf-create:
@go test -timeout=45m -v ./createcluster/...
.PHONY: tf-upgrade
tf-upgrade:
@go test -timeout=45m -v ./upgradecluster/... -${ARGNAME}=${ARGVALUE}
.PHONY: tf-remove-state
tf-remove-state:
@rm -rf ./modules/k3scluster/.terraform
@rm -rf ./modules/k3scluster/.terraform.lock.hcl ./modules/k3scluster/terraform.tfstate ./modules/k3scluster/terraform.tfstate.backup
.PHONY: tf-test-suite
tf-test-suite:
@make tf-remove-state && make tf-create ; sleep 5 && \
make tf-remove-state && make tf-upgrade ${ARGNAME}=${ARGVALUE}
.PHONY: tf-test-suite-same-cluster
tf-test-suite-same-cluster:
@make tf-create ; sleep 5 && make v ${ARGNAME}=${ARGVALUE}
#========================= TestCode Static Quality Check =========================#
.PHONY: vet-lint ## Run locally only inside Tests package
vet-lint:
@echo "Running go vet and lint"
@go vet ./${TESTDIR} && golangci-lint run --tests

View File

@ -14,11 +14,24 @@ See the [create cluster test](../tests/terraform/createcluster_test.go) as an ex
## Running
Before running the tests, it's best to create a tfvars file in `./tests/terraform/modules/k3scluster/config/local.tfvars`. There is some information there to get you started, but the empty variables should be filled in appropriately per your AWS environment.
- Before running the tests, you should creat local.tfvars file in `./tests/terraform/modules/k3scluster/config/local.tfvars`. There is some information there to get you started, but the empty variables should be filled in appropriately per your AWS environment.
- For running tests with "etcd" cluster type, you should add the value "etcd" to the variable "cluster_type" , also you need have those variables at least empty:
```
- external_db
- external_db_version
- instance_class
- db_group_name
```
- For running with external db you need the same variables above filled in with the correct data and also cluster_type= ""
All TF tests can be run with:
```bash
go test -timeout=60m ./tests/terrfaorm/... -run TF
go test -timeout=60m ./tests/terraform/... -run TF
```
Tests can be run individually with:
```bash
@ -27,10 +40,79 @@ go test -timeout=30m ./tests/terraform/createcluster/createcluster.go ./tests/te
go test -v -timeout=30m ./tests/terraform/... -run TFClusterCreateValidation
# example with vars:
go test -timeout=30m -v ./tests/terraform/createcluster.go ./tests/terraform/createcluster_test.go -node_os=ubuntu -aws_ami=ami-02f3416038bdb17fb -cluster_type=etcd -resource_name=localrun1 -sshuser=ubuntu -sshkey="key-name" -destroy=false
```
Test Flags:
```
- ${upgradeVersion} version to upgrade to
```
We can also run tests through the Makefile through tests' directory:
- On the first run with make and docker please delete your .terraform folder, terraform.tfstate and terraform.hcl.lock file
```bash
Args:
*All args are optional and can be used with:
`$make tf-run` `$make tf-logs`,
`$make vet-lint` `$make tf-complete`,
`$make tf-upgrade` `$make tf-test-suite-same-cluster`,
`$make tf-test-suite`
- ${IMGNAME} append any string to the end of image name
- ${TAGNAME} append any string to the end of tag name
- ${ARGNAME} name of the arg to pass to the test
- ${ARGVALUE} value of the arg to pass to the test
- ${TESTDIR} path to the test directory
Commands:
$ make tf-up # create the image from Dockerfile.build
$ make tf-run # runs all tests if no flags or args provided
$ make tf-down # removes the image
$ make tf-clean # removes instances and resources created by tests
$ make tf-logs # prints logs from container the tests
$ make tf-complete # clean resources + remove images + run tests
$ make tf-create # runs create cluster test locally
$ make tf-upgrade # runs upgrade cluster test locally
$ make tf-test-suite-same-cluster # runs all tests locally in sequence using the same state
$ make tf-remove-state # removes terraform state dir and files
$ make tf-test-suite # runs all tests locally in sequence not using the same state
$ make vet-lint # runs go vet and go lint
Examples:
$ make tf-up TAGNAME=ubuntu
$ make tf-run IMGNAME=2 TAGNAME=ubuntu TESTDIR=upgradecluster ARGNAME=upgradeVersion ARGVALUE=v1.26.2+k3s1
$ make tf-run TESTDIR=upgradecluster
$ make tf-logs IMGNAME=1
$ make vet-lint TESTDIR=upgradecluster
```
In between tests, if the cluster is not destroyed, then make sure to delete the ./tests/terraform/terraform.tfstate file if you want to create a new cluster.
# Running tests in parallel:
- You can play around and have a lot of different test combinations like:
```
- Build docker image with different TAGNAME="OS`s" + with different configurations( resource_name, node_os, versions, install type, nodes and etc) and have unique "IMGNAMES"
- And in the meanwhile run also locally with different configuration while your dockers TAGNAME and IMGNAMES are running
```
# In between tests:
- If you want to run with same cluster do not delete ./tests/terraform/modules/terraform.tfstate + .terraform.lock.hcl file after each test.
- if you want to use new resources then make sure to delete the ./tests/terraform/modules/terraform.tfstate + .terraform.lock.hcl file if you want to create a new cluster.
# Common Issues:
- Issues related to terraform plugin please also delete the modules/.terraform folder
- In mac m1 maybe you need also to go to rke2/tests/terraform/modules and run `terraform init` to download the plugins
# Reporting:
Additionally, to generate junit reporting for the tests, the Ginkgo CLI is used. Installation instructions can be found [here.](https://onsi.github.io/ginkgo/#getting-started)
To run the all TF tests and generate JUnit testing reports:

13
tests/terraform/config.mk Normal file
View File

@ -0,0 +1,13 @@
SHELL := /bin/bash
LOCAL_TFVARS_PATH := modules/k3scluster/config/local.tfvars
ifeq ($(wildcard ${LOCAL_TFVARS_PATH}),)
RESOURCE_NAME :=
else
export RESOURCE_NAME := $(shell sed -n 's/resource_name *= *"\([^"]*\)"/\1/p' ${LOCAL_TFVARS_PATH})
endif
export ACCESS_KEY_LOCAL
export AWS_ACCESS_KEY_ID
export AWS_SECRET_ACCESS_KEY

View File

@ -2,6 +2,7 @@ package createcluster
import (
"fmt"
"strconv"
"path/filepath"
"testing"
@ -11,59 +12,52 @@ import (
)
var (
KubeConfigFile string
MasterIPs string
WorkerIPs string
KubeConfigFile string
MasterIPs string
WorkerIPs string
NumServers int
NumWorkers int
AwsUser string
AccessKey string
RenderedTemplate string
ExternalDb string
ClusterType string
TfVarsPath = "/tests/terraform/modules/k3scluster/config/local.tfvars"
modulesPath = "/tests/terraform/modules/k3scluster"
)
type options struct {
nodeOs string
awsAmi string
clusterType string
resourceName string
externalDb string
sshuser string
sshkey string
accessKey string
serverNodes int
workerNodes int
}
func ClusterOptions(os ...ClusterOption) map[string]interface{} {
opts := options{}
for _, o := range os {
opts = o(opts)
}
return map[string]interface{}{
"node_os": opts.nodeOs,
"aws_ami": opts.awsAmi,
"cluster_type": opts.clusterType,
"resource_name": opts.resourceName,
"external_db": opts.externalDb,
"aws_user": opts.sshuser,
"key_name": opts.sshkey,
"access_key": opts.accessKey,
"no_of_server_nodes": opts.serverNodes,
"no_of_worker_nodes": opts.workerNodes,
}
}
func BuildCluster(t *testing.T, tfVarsPath string, destroy bool, terraformVars map[string]interface{}) (string, error) {
func BuildCluster(t *testing.T, destroy bool) (string, error) {
basepath := tf.GetBasepath()
tfDir, err := filepath.Abs(basepath + "/tests/terraform/modules/k3scluster")
tfDir, err := filepath.Abs(basepath + modulesPath)
if err != nil {
return "", err
}
varDir, err := filepath.Abs(basepath + tfVarsPath)
varDir, err := filepath.Abs(basepath + TfVarsPath)
if err != nil {
return "", err
}
TerraformOptions := &terraform.Options{
TerraformDir: tfDir,
VarFiles: []string{varDir},
Vars: terraformVars,
}
NumServers, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir,
"no_of_server_nodes"))
if err != nil {
return "", err
}
NumWorkers, err = strconv.Atoi(terraform.GetVariableAsStringFromVarFile(t, varDir,
"no_of_worker_nodes"))
if err != nil {
return "", err
}
ClusterType = terraform.GetVariableAsStringFromVarFile(t, varDir, "cluster_type")
ExternalDb = terraform.GetVariableAsStringFromVarFile(t, varDir, "external_db")
AwsUser = terraform.GetVariableAsStringFromVarFile(t, varDir, "aws_user")
AccessKey = terraform.GetVariableAsStringFromVarFile(t, varDir, "access_key")
if destroy {
fmt.Printf("Cluster is being deleted")
terraform.Destroy(t, TerraformOptions)
@ -71,72 +65,12 @@ func BuildCluster(t *testing.T, tfVarsPath string, destroy bool, terraformVars m
}
fmt.Printf("Creating Cluster")
terraform.InitAndApply(t, TerraformOptions)
KubeConfigFile = "/tmp/" + terraform.Output(t, TerraformOptions, "kubeconfig") + "_kubeconfig"
MasterIPs = terraform.Output(t, TerraformOptions, "master_ips")
WorkerIPs = terraform.Output(t, TerraformOptions, "worker_ips")
RenderedTemplate = terraform.Output(t, TerraformOptions, "rendered_template")
return "cluster created", err
}
type ClusterOption func(o options) options
func NodeOs(n string) ClusterOption {
return func(o options) options {
o.nodeOs = n
return o
}
}
func AwsAmi(n string) ClusterOption {
return func(o options) options {
o.awsAmi = n
return o
}
}
func ClusterType(n string) ClusterOption {
return func(o options) options {
o.clusterType = n
return o
}
}
func ResourceName(n string) ClusterOption {
return func(o options) options {
o.resourceName = n
return o
}
}
func ExternalDb(n string) ClusterOption {
return func(o options) options {
o.externalDb = n
return o
}
}
func Sshuser(n string) ClusterOption {
return func(o options) options {
o.sshuser = n
return o
}
}
func Sshkey(n string) ClusterOption {
return func(o options) options {
o.sshkey = n
return o
}
}
func AccessKey(n string) ClusterOption {
return func(o options) options {
o.accessKey = n
return o
}
}
func ServerNodes(n int) ClusterOption {
return func(o options) options {
o.serverNodes = n
return o
}
}
func WorkerNodes(n int) ClusterOption {
return func(o options) options {
o.workerNodes = n
return o
}
}

View File

@ -7,27 +7,16 @@ import (
"testing"
tf "github.com/k3s-io/k3s/tests/terraform"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var awsAmi = flag.String("aws_ami", "", "a valid ami string like ami-abcxyz123")
var nodeOs = flag.String("node_os", "ubuntu", "a string")
var externalDb = flag.String("external_db", "mysql", "a string")
var arch = flag.String("arch", "amd64", "a string")
var clusterType = flag.String("cluster_type", "etcd", "a string")
var resourceName = flag.String("resource_name", "etcd", "a string")
var sshuser = flag.String("sshuser", "ubuntu", "a string")
var sshkey = flag.String("sshkey", "", "a string")
var accessKey = flag.String("access_key", "", "local path to the private sshkey")
var serverNodes = flag.Int("no_of_server_nodes", 2, "count of server nodes")
var workerNodes = flag.Int("no_of_worker_nodes", 1, "count of worker nodes")
var tfVars = flag.String("tfvars", "/tests/terraform/modules/k3scluster/config/local.tfvars", "custom .tfvars file from base project path")
var destroy = flag.Bool("destroy", false, "a bool")
var failed = false
var terraformOptions map[string]interface{}
var (
arch = flag.String("arch", "amd64", "a string")
destroy = flag.Bool("destroy", false, "a bool")
failed = false
)
func Test_TFClusterCreateValidation(t *testing.T) {
RegisterFailHandler(Fail)
@ -36,27 +25,42 @@ func Test_TFClusterCreateValidation(t *testing.T) {
RunSpecs(t, "Create Cluster Test Suite")
}
var _ = BeforeSuite(func() {
terraformOptions = ClusterOptions(NodeOs(*nodeOs), AwsAmi(*awsAmi), ClusterType(*clusterType), ExternalDb(*externalDb), ResourceName(*resourceName), AccessKey(*accessKey), Sshuser(*sshuser), ServerNodes(*serverNodes), WorkerNodes(*workerNodes), Sshkey(*sshkey))
})
var _ = Describe("Test:", func() {
Context("Build Cluster:", func() {
It("Starts up with no issues", func() {
status, err := BuildCluster(&testing.T{}, *tfVars, false, terraformOptions)
status, err := BuildCluster(&testing.T{}, false)
Expect(err).NotTo(HaveOccurred())
Expect(status).To(Equal("cluster created"))
defer GinkgoRecover()
if strings.Contains(*clusterType, "etcd") {
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "\nBACKEND", *clusterType)
if strings.Contains(ClusterType, "etcd") {
fmt.Println("Backend:", ClusterType)
} else {
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "\nBACKEND", *externalDb)
fmt.Println("Backend:", ExternalDb)
}
fmt.Printf("\nIPs:\n")
if ExternalDb != "" && ClusterType == "" {
for i := 0; i > len(MasterIPs); i++ {
cmd := "grep \"datastore-endpoint\" /etc/systemd/system/k3s.service"
res, err := tf.RunCmdOnNode(cmd, string(MasterIPs[0]), AwsUser, AccessKey)
Expect(err).NotTo(HaveOccurred())
Expect(res).Should(ContainSubstring(RenderedTemplate))
}
}
tf.PrintFileContents(KubeConfigFile)
Expect(KubeConfigFile).ShouldNot(BeEmpty())
Expect(MasterIPs).ShouldNot(BeEmpty())
fmt.Println("Server Node IPS:", MasterIPs)
fmt.Println("Agent Node IPS:", WorkerIPs)
fmt.Println(KubeConfigFile)
Expect(KubeConfigFile).Should(ContainSubstring(*resourceName))
if NumWorkers > 0 {
Expect(WorkerIPs).ShouldNot(BeEmpty())
} else {
Expect(WorkerIPs).Should(BeEmpty())
}
})
It("Checks Node and Pod Status", func() {
@ -71,21 +75,27 @@ var _ = Describe("Test:", func() {
}
}()
expectedNodeCount := NumServers + NumWorkers
fmt.Printf("\nFetching node status\n")
expectedNodeCount := *serverNodes + *workerNodes + 1
Eventually(func(g Gomega) {
nodes, err := tf.ParseNodes(KubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec")
Expect(err).NotTo(HaveOccurred())
Expect(len(nodes)).To(Equal(expectedNodeCount),
"Number of nodes should match the spec")
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state")
g.Expect(node.Status).Should(Equal("Ready"),
"Nodes should all be in Ready state")
}
}, "420s", "5s").Should(Succeed())
}, "600s", "5s").Should(Succeed())
fmt.Printf("\nFetching pod status\n")
Eventually(func(g Gomega) {
pods, err := tf.ParsePods(KubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
@ -102,18 +112,22 @@ var _ = Describe("Test:", func() {
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip" +
" --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should((ContainSubstring("test-clusterip")))
g.Expect(res).Should(ContainSubstring("test-clusterip"))
}, "420s", "5s").Should(Succeed())
clusterip, _ := tf.FetchClusterIP(KubeConfigFile, "nginx-clusterip-svc")
cmd := "curl -sL --insecure http://" + clusterip + "/name.html"
nodeExternalIP := tf.FetchNodeExternalIP(KubeConfigFile)
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
res, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
res, err := tf.RunCmdOnNode(cmd, ip, AwsUser, AccessKey)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-clusterip"))
}, "420s", "10s").Should(Succeed())
@ -123,15 +137,20 @@ var _ = Describe("Test:", func() {
It("Verifies NodePort Service", func() {
_, err := tf.DeployWorkload("nodeport.yaml", KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
nodeExternalIP := tf.FetchNodeExternalIP(KubeConfigFile)
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + KubeConfigFile +
" --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport " +
"--field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-nodeport"))
}, "240s", "5s").Should(Succeed())
@ -139,6 +158,7 @@ var _ = Describe("Test:", func() {
cmd = "curl -sL --insecure http://" + ip + ":" + nodeport + "/name.html"
Eventually(func(g Gomega) {
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-nodeport"))
}, "240s", "5s").Should(Succeed())
@ -148,15 +168,20 @@ var _ = Describe("Test:", func() {
It("Verifies LoadBalancer Service", func() {
_, err := tf.DeployWorkload("loadbalancer.yaml", KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
nodeExternalIP := tf.FetchNodeExternalIP(KubeConfigFile)
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
nodeExternalIP := tf.FetchNodeExternalIP(KubeConfigFile)
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + KubeConfigFile +
" --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer" +
" --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
@ -164,6 +189,7 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd = "curl -sL --insecure http://" + ip + ":" + port + "/name.html"
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
@ -175,8 +201,10 @@ var _ = Describe("Test:", func() {
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress " +
"--field-selector=status.phase=Running --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-ingress"))
}, "240s", "5s").Should(Succeed())
@ -203,7 +231,8 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
count := tf.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
g.Expect(len(nodes)).Should(Equal(count),
"Daemonset pod count does not match node count")
}, "420s", "10s").Should(Succeed())
})
@ -214,6 +243,7 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
g.Expect(res).Should(ContainSubstring("Bound"))
@ -222,12 +252,14 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
cmd := "kubectl --kubeconfig=" + KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
cmd := "kubectl --kubeconfig=" + KubeConfigFile +
" exec volume-test -- sh -c 'echo local-path-test > /data/test'"
_, err = tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
@ -235,12 +267,15 @@ var _ = Describe("Test:", func() {
_, err = tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
_, err = tf.DeployWorkload("local-path-provisioner.yaml", KubeConfigFile, *arch)
_, err = tf.DeployWorkload("local-path-provisioner.yaml",
KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + KubeConfigFile
cmd := "kubectl get pods -o=name -l app=local-path-provisioner" +
" --field-selector=status.phase=Running -n kube-system --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("pod/local-path-provisioner"))
}, "420s", "2s").Should(Succeed())
@ -248,6 +283,7 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
@ -256,6 +292,7 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd := "kubectl exec volume-test --kubeconfig=" + KubeConfigFile + " -- cat /data/test"
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("local-path-test"))
}, "180s", "2s").Should(Succeed())
@ -268,12 +305,14 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods dnsutils --kubeconfig=" + KubeConfigFile
res, _ := tf.RunCommand(cmd)
g.Expect(res).Should(ContainSubstring("dnsutils"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl --kubeconfig=" + KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
cmd := "kubectl --kubeconfig=" + KubeConfigFile +
" exec -t dnsutils -- nslookup kubernetes.default"
res, _ := tf.RunCommand(cmd)
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
}, "420s", "2s").Should(Succeed())
@ -282,7 +321,11 @@ var _ = Describe("Test:", func() {
})
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
if CurrentSpecReport().Failed() {
fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText())
} else {
fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText())
}
})
var _ = BeforeEach(func() {
@ -293,13 +336,8 @@ var _ = BeforeEach(func() {
})
var _ = AfterSuite(func() {
if failed {
fmt.Println("FAILED!")
} else {
fmt.Println("PASSED!")
}
if *destroy {
status, err := BuildCluster(&testing.T{}, *tfVars, *destroy, terraformOptions)
status, err := BuildCluster(&testing.T{}, *destroy)
Expect(err).NotTo(HaveOccurred())
Expect(status).To(Equal("cluster destroyed"))
}

View File

@ -25,7 +25,7 @@ export "${3}"="${4}"
if [ "${5}" = "etcd" ]
then
echo "CLUSTER TYPE is etcd"
if [[ "$4" == *"v1.18"* ]] || [["$4" == *"v1.17"* ]] && [[ -n "$8" ]]
if [[ "$4" == *"v1.18"* ]] || [[ "$4" == *"v1.17"* ]] && [[ -n "$8" ]]
then
echo "curl -sfL https://get.k3s.io | INSTALL_K3S_TYPE='server' sh -s - --cluster-init --node-external-ip=${6} $8" >/tmp/master_cmd
curl -sfL https://get.k3s.io | INSTALL_K3S_TYPE='server' sh -s - --cluster-init --node-external-ip="${6}" "$8"
@ -35,7 +35,6 @@ then
fi
else
echo "CLUSTER TYPE is external db"
echo "$8"
if [[ "$4" == *"v1.18"* ]] || [[ "$4" == *"v1.17"* ]] && [[ -n "$8" ]]
then
echo "curl -sfL https://get.k3s.io | sh -s - server --node-external-ip=${6} --datastore-endpoint=\"${7}\" $8" >/tmp/master_cmd

View File

@ -1,12 +1,12 @@
resource "aws_db_instance" "db" {
count = (var.cluster_type == "etcd" ? 0 : (var.external_db != "aurora-mysql" ? 1 : 0))
identifier = "${var.resource_name}-db"
allocated_storage = 20
count = (var.cluster_type == "etcd" || var.external_db == "" || var.external_db == "NULL" ? 0 : (var.external_db != "" && var.external_db != "aurora-mysql" ? 1 : 0))
identifier = "${var.resource_name}${local.random_string}-db"
storage_type = "gp2"
allocated_storage = 20
engine = var.external_db
engine_version = var.external_db_version
instance_class = var.instance_class
name = "mydb"
db_name = "mydb"
parameter_group_name = var.db_group_name
username = var.db_username
password = var.db_password
@ -18,8 +18,8 @@ resource "aws_db_instance" "db" {
}
resource "aws_rds_cluster" "db" {
count = (var.external_db == "aurora-mysql" ? 1 : 0)
cluster_identifier = "${var.resource_name}-db"
count = (var.external_db == "aurora-mysql" && var.cluster_type == "" ? 1 : 0)
cluster_identifier = "${var.resource_name}${local.random_string}-db"
engine = var.external_db
engine_version = var.external_db_version
availability_zones = [var.availability_zone]
@ -34,9 +34,9 @@ resource "aws_rds_cluster" "db" {
}
resource "aws_rds_cluster_instance" "db" {
count = (var.external_db == "aurora-mysql" ? 1 : 0)
cluster_identifier = "${aws_rds_cluster.db[0].id}"
identifier = "${var.resource_name}-instance1"
count = (var.external_db == "aurora-mysql" && var.cluster_type == "" ? 1 : 0)
cluster_identifier = aws_rds_cluster.db[0].id
identifier = "${var.resource_name}${local.random_string}-instance1"
instance_class = var.instance_class
engine = aws_rds_cluster.db[0].engine
engine_version = aws_rds_cluster.db[0].engine_version
@ -69,7 +69,7 @@ resource "aws_instance" "master" {
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/install_k3s_master.sh",
"sudo /tmp/install_k3s_master.sh ${var.node_os} ${var.create_lb ? aws_route53_record.aws_route53[0].fqdn : "${aws_instance.master.public_ip}"} ${var.install_mode} ${var.k3s_version} ${var.cluster_type} ${self.public_ip} \"${data.template_file.test.rendered}\" \"${var.server_flags}\" ${var.username} ${var.password}",
"sudo /tmp/install_k3s_master.sh ${var.node_os} ${var.create_lb ? aws_route53_record.aws_route53[0].fqdn : self.public_ip} ${var.install_mode} ${var.k3s_version} ${var.cluster_type == "" ? var.external_db : "etcd"} ${self.public_ip} \"${data.template_file.test.rendered}\" \"${var.server_flags}\" ${var.username} ${var.password}",
]
}
provisioner "local-exec" {
@ -93,27 +93,36 @@ resource "aws_instance" "master" {
}
data "template_file" "test" {
template = (var.cluster_type == "etcd" ? "NULL": (var.external_db == "postgres" ? "postgres://${aws_db_instance.db[0].username}:${aws_db_instance.db[0].password}@${aws_db_instance.db[0].endpoint}/${aws_db_instance.db[0].name}" : (var.external_db == "aurora-mysql" ? "mysql://${aws_rds_cluster.db[0].master_username}:${aws_rds_cluster.db[0].master_password}@tcp(${aws_rds_cluster.db[0].endpoint})/${aws_rds_cluster.db[0].database_name}" : "mysql://${aws_db_instance.db[0].username}:${aws_db_instance.db[0].password}@tcp(${aws_db_instance.db[0].endpoint})/${aws_db_instance.db[0].name}")))
template = (var.cluster_type == "etcd" ? "NULL": (var.external_db == "postgres" ? "postgres://${aws_db_instance.db[0].username}:${aws_db_instance.db[0].password}@${aws_db_instance.db[0].endpoint}/${aws_db_instance.db[0].db_name}" : (var.external_db == "aurora-mysql" ? "mysql://${aws_rds_cluster.db[0].master_username}:${aws_rds_cluster.db[0].master_password}@tcp(${aws_rds_cluster.db[0].endpoint})/${aws_rds_cluster.db[0].database_name}" : "mysql://${aws_db_instance.db[0].username}:${aws_db_instance.db[0].password}@tcp(${aws_db_instance.db[0].endpoint})/${aws_db_instance.db[0].db_name}")))
depends_on = [data.template_file.test_status]
}
data "template_file" "test_status" {
template = (var.cluster_type == "etcd" ? "NULL": ((var.external_db == "postgres" ? aws_db_instance.db[0].endpoint : (var.external_db == "aurora-mysql" ? aws_rds_cluster_instance.db[0].endpoint : aws_db_instance.db[0].endpoint))))
}
data "local_file" "token" {
filename = "/tmp/${var.resource_name}_nodetoken"
depends_on = [aws_instance.master]
}
locals {
node_token = trimspace("${data.local_file.token.content}")
node_token = trimspace(data.local_file.token.content)
}
resource "random_string" "suffix" {
length = 3
upper = false
special = false
}
locals {
random_string = random_string.suffix.result
}
resource "aws_instance" "master2-ha" {
ami = var.aws_ami
instance_type = var.ec2_instance_class
count = var.no_of_server_nodes
count = var.no_of_server_nodes - 1
connection {
type = "ssh"
user = var.aws_user
@ -130,7 +139,7 @@ resource "aws_instance" "master2-ha" {
key_name = var.key_name
depends_on = [aws_instance.master]
tags = {
Name = "${var.resource_name}-servers"
Name = "${var.resource_name}-server-ha${count.index + 1}"
}
provisioner "file" {
source = "join_k3s_master.sh"
@ -148,8 +157,8 @@ resource "aws_lb_target_group" "aws_tg_80" {
count = var.create_lb ? 1 : 0
port = 80
protocol = "TCP"
vpc_id = "${var.vpc_id}"
name = "${var.resource_name}-tg-80"
vpc_id = var.vpc_id
name = "${var.resource_name}${local.random_string}-tg-80"
health_check {
protocol = "HTTP"
port = "traffic-port"
@ -164,16 +173,16 @@ resource "aws_lb_target_group" "aws_tg_80" {
resource "aws_lb_target_group_attachment" "aws_tg_attachment_80" {
count = var.create_lb ? 1 : 0
target_group_arn = "${aws_lb_target_group.aws_tg_80[0].arn}"
target_id = "${aws_instance.master.id}"
target_group_arn = aws_lb_target_group.aws_tg_80[0].arn
target_id = aws_instance.master.id
port = 80
depends_on = ["aws_instance.master"]
}
resource "aws_lb_target_group_attachment" "aws_tg_attachment_80_2" {
target_group_arn = "${aws_lb_target_group.aws_tg_80[0].arn}"
target_group_arn = aws_lb_target_group.aws_tg_80[0].arn
count = var.create_lb ? length(aws_instance.master2-ha) : 0
target_id = "${aws_instance.master2-ha[count.index].id}"
target_id = aws_instance.master2-ha[count.index].id
port = 80
depends_on = ["aws_instance.master"]
}
@ -182,8 +191,8 @@ resource "aws_lb_target_group" "aws_tg_443" {
count = var.create_lb ? 1 : 0
port = 443
protocol = "TCP"
vpc_id = "${var.vpc_id}"
name = "${var.resource_name}-tg-443"
vpc_id = var.vpc_id
name = "${var.resource_name}${local.random_string}-tg-443"
health_check {
protocol = "HTTP"
port = 80
@ -198,16 +207,16 @@ resource "aws_lb_target_group" "aws_tg_443" {
resource "aws_lb_target_group_attachment" "aws_tg_attachment_443" {
count = var.create_lb ? 1 : 0
target_group_arn = "${aws_lb_target_group.aws_tg_443[0].arn}"
target_id = "${aws_instance.master.id}"
target_group_arn = aws_lb_target_group.aws_tg_443[0].arn
target_id = aws_instance.master.id
port = 443
depends_on = ["aws_instance.master"]
}
resource "aws_lb_target_group_attachment" "aws_tg_attachment_443_2" {
target_group_arn = "${aws_lb_target_group.aws_tg_443[0].arn}"
target_group_arn = aws_lb_target_group.aws_tg_443[0].arn
count = var.create_lb ? length(aws_instance.master2-ha) : 0
target_id = "${aws_instance.master2-ha[count.index].id}"
target_id = aws_instance.master2-ha[count.index].id
port = 443
depends_on = ["aws_instance.master"]
}
@ -216,22 +225,22 @@ resource "aws_lb_target_group" "aws_tg_6443" {
count = var.create_lb ? 1 : 0
port = 6443
protocol = "TCP"
vpc_id = "${var.vpc_id}"
name = "${var.resource_name}-tg-6443"
vpc_id = var.vpc_id
name = "${var.resource_name}${local.random_string}-tg-6443"
}
resource "aws_lb_target_group_attachment" "aws_tg_attachment_6443" {
count = var.create_lb ? 1 : 0
target_group_arn = "${aws_lb_target_group.aws_tg_6443[0].arn}"
target_id = "${aws_instance.master.id}"
target_group_arn = aws_lb_target_group.aws_tg_6443[0].arn
target_id = aws_instance.master.id
port = 6443
depends_on = ["aws_instance.master"]
}
resource "aws_lb_target_group_attachment" "aws_tg_attachment_6443_2" {
target_group_arn = "${aws_lb_target_group.aws_tg_6443[0].arn}"
target_group_arn = aws_lb_target_group.aws_tg_6443[0].arn
count = var.create_lb ? length(aws_instance.master2-ha) : 0
target_id = "${aws_instance.master2-ha[count.index].id}"
target_id = aws_instance.master2-ha[count.index].id
port = 6443
depends_on = ["aws_instance.master"]
}
@ -240,54 +249,54 @@ resource "aws_lb" "aws_nlb" {
count = var.create_lb ? 1 : 0
internal = false
load_balancer_type = "network"
subnets = ["${var.subnets}"]
name = "${var.resource_name}-nlb"
subnets = [var.subnets]
name = "${var.resource_name}${local.random_string}-nlb"
}
resource "aws_lb_listener" "aws_nlb_listener_80" {
count = var.create_lb ? 1 : 0
load_balancer_arn = "${aws_lb.aws_nlb[0].arn}"
load_balancer_arn = aws_lb.aws_nlb[0].arn
port = "80"
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = "${aws_lb_target_group.aws_tg_80[0].arn}"
target_group_arn = aws_lb_target_group.aws_tg_80[0].arn
}
}
resource "aws_lb_listener" "aws_nlb_listener_443" {
count = var.create_lb ? 1 : 0
load_balancer_arn = "${aws_lb.aws_nlb[0].arn}"
load_balancer_arn = aws_lb.aws_nlb[0].arn
port = "443"
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = "${aws_lb_target_group.aws_tg_443[0].arn}"
target_group_arn = aws_lb_target_group.aws_tg_443[0].arn
}
}
resource "aws_lb_listener" "aws_nlb_listener_6443" {
count = var.create_lb ? 1 : 0
load_balancer_arn = "${aws_lb.aws_nlb[0].arn}"
load_balancer_arn = aws_lb.aws_nlb[0].arn
port = "6443"
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = "${aws_lb_target_group.aws_tg_6443[0].arn}"
target_group_arn = aws_lb_target_group.aws_tg_6443[0].arn
}
}
resource "aws_route53_record" "aws_route53" {
count = var.create_lb ? 1 : 0
zone_id = "${data.aws_route53_zone.selected.zone_id}"
name = "${var.resource_name}"
zone_id = data.aws_route53_zone.selected.zone_id
name = "${var.resource_name}${local.random_string}-r53"
type = "CNAME"
ttl = "300"
records = ["${aws_lb.aws_nlb[0].dns_name}"]
records = [aws_lb.aws_nlb[0].dns_name]
depends_on = ["aws_lb_listener.aws_nlb_listener_6443"]
}
data "aws_route53_zone" "selected" {
name = "${var.qa_space}"
name = var.qa_space
private_zone = false
}
}

View File

@ -12,3 +12,7 @@ output "kubeconfig" {
value = var.resource_name
description = "kubeconfig of the cluster created"
}
output "rendered_template" {
value = data.template_file.test.rendered
}

View File

@ -10,11 +10,9 @@ variable "qa_space" {}
variable "ec2_instance_class" {}
variable "resource_name" {}
variable "key_name" {}
variable "external_db" {}
variable "external_db_version" {}
variable "instance_class" {}
variable "db_group_name" {}
variable "username" {}
variable "password" {}

View File

@ -12,3 +12,7 @@ output "kubeconfig" {
value = module.master.kubeconfig
description = "kubeconfig of the cluster created"
}
output "rendered_template" {
value = module.master.rendered_template
}

View File

@ -0,0 +1,101 @@
#!/bin/bash
#Get resource name from tfvarslocal && change name to make more sense in this context
RESOURCE_NAME=$(grep resource_name <modules/k3scluster/config/local.tfvars | cut -d= -f2 | tr -d ' "')
NAME_PREFIX="$RESOURCE_NAME"
#Terminate the instances
echo "Terminating resources for $NAME_PREFIX if still up and running"
# shellcheck disable=SC2046
aws ec2 terminate-instances --instance-ids $(aws ec2 describe-instances \
--filters "Name=tag:Name,Values=${NAME_PREFIX}*" \
"Name=instance-state-name,Values=running" --query \
'Reservations[].Instances[].InstanceId' --output text) > /dev/null 2>&1
#Search for DB instances and delete them
INSTANCES=$(aws rds describe-db-instances --query "DBInstances[?starts_with(DBInstanceIdentifier,
'${NAME_PREFIX}')].DBInstanceIdentifier" --output text 2> /dev/null)
for instance in $INSTANCES; do
aws rds delete-db-instance --db-instance-identifier "$instance" --skip-final-snapshot > /dev/null 2>&1
done
#Search for DB clusters and delete them
CLUSTERS=$(aws rds describe-db-clusters --query "DBClusters[?starts_with(DBClusterIdentifier,
'${NAME_PREFIX}')].DBClusterIdentifier" --output text 2> /dev/null)
for cluster in $CLUSTERS; do
aws rds delete-db-cluster --db-cluster-identifier "$cluster" --skip-final-snapshot > /dev/null 2>&1
aws rds wait db-cluster-deleted --db-cluster-identifier "$cluster"
done
#Get the list of load balancer ARNs
LB_ARN_LIST=$(aws elbv2 describe-load-balancers \
--query "LoadBalancers[?starts_with(LoadBalancerName, '${NAME_PREFIX}') && Type=='network'].LoadBalancerArn" \
--output text)
#Loop through the load balancer ARNs and delete the load balancers
for LB_ARN in $LB_ARN_LIST; do
echo "Deleting load balancer $LB_ARN"
aws elbv2 delete-load-balancer --load-balancer-arn "$LB_ARN"
done
#Get the list of target group ARNs
TG_ARN_LIST=$(aws elbv2 describe-target-groups \
--query "TargetGroups[?starts_with(TargetGroupName, '${NAME_PREFIX}') && Protocol=='TCP'].TargetGroupArn" \
--output text)
#Loop through the target group ARNs and delete the target groups
for TG_ARN in $TG_ARN_LIST; do
echo "Deleting target group $TG_ARN"
aws elbv2 delete-target-group --target-group-arn "$TG_ARN"
done
#Get the ID and recordName with lower case of the hosted zone that contains the Route 53 record sets
NAME_PREFIX_LOWER=$(echo "$NAME_PREFIX" | tr '[:upper:]' '[:lower:]')
R53_ZONE_ID=$(aws route53 list-hosted-zones-by-name --dns-name "${NAME_PREFIX}." \
--query "HostedZones[0].Id" --output text)
R53_RECORD=$(aws route53 list-resource-record-sets \
--hosted-zone-id "${R53_ZONE_ID}" \
--query "ResourceRecordSets[?starts_with(Name, '${NAME_PREFIX_LOWER}.') && Type == 'CNAME'].Name" \
--output text)
#Get ResourceRecord Value
RECORD_VALUE=$(aws route53 list-resource-record-sets \
--hosted-zone-id "${R53_ZONE_ID}" \
--query "ResourceRecordSets[?starts_with(Name, '${NAME_PREFIX_LOWER}.') \
&& Type == 'CNAME'].ResourceRecords[0].Value" --output text)
#Delete Route53 record
if [[ "$R53_RECORD" == "${NAME_PREFIX_LOWER}."* ]]; then
echo "Deleting Route53 record ${R53_RECORD}"
CHANGE_STATUS=$(aws route53 change-resource-record-sets --hosted-zone-id "${R53_ZONE_ID}" \
--change-batch '{"Changes": [
{
"Action": "DELETE",
"ResourceRecordSet": {
"Name": "'"${R53_RECORD}"'",
"Type": "CNAME",
"TTL": 300,
"ResourceRecords": [
{
"Value": "'"${RECORD_VALUE}"'"
}
]
}
}
]
}')
STATUS_ID=$(echo "$CHANGE_STATUS" | jq -r '.ChangeInfo.Id')
#Get status from the change
aws route53 wait resource-record-sets-changed --id "$STATUS_ID"
echo "Successfully deleted Route53 record ${R53_RECORD}: status: ${STATUS_ID}"
else
echo "No Route53 record found"
fi

View File

@ -14,6 +14,8 @@ import (
"golang.org/x/crypto/ssh"
)
var config *ssh.ClientConfig
type Node struct {
Name string
Status string
@ -33,11 +35,6 @@ type Pod struct {
Node string
}
var config *ssh.ClientConfig
var SSHKEY string
var SSHUSER string
var err error
func GetBasepath() string {
_, b, _, _ := runtime.Caller(0)
return filepath.Join(filepath.Dir(b), "../..")
@ -91,10 +88,6 @@ func runsshCommand(cmd string, conn *ssh.Client) (string, error) {
return fmt.Sprintf("%s", stdoutBuf.String()), err
}
// nodeOs: ubuntu centos7 centos8 sles15
// clusterType arm, etcd externaldb, if external_db var is not "" picks database from the vars file,
// resourceName: name to resource created timestamp attached
// RunCmdOnNode executes a command from within the given node
func RunCmdOnNode(cmd string, ServerIP string, SSHUser string, SSHKey string) (string, error) {
Server := ServerIP + ":22"
@ -122,6 +115,7 @@ func CountOfStringInSlice(str string, pods []Pod) int {
return count
}
// DeployWorkload deploys the workloads on the cluster from resource manifest files
func DeployWorkload(workload, kubeconfig string, arch string) (string, error) {
resourceDir := GetBasepath() + "/tests/terraform/amd64_resource_files"
if arch == "arm64" {
@ -172,6 +166,8 @@ func FetchIngressIP(kubeconfig string) ([]string, error) {
return ingressIPs, nil
}
// ParseNodes parses the nodes from the kubectl get nodes command
// and returns a list of nodes
func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
nodes := make([]Node, 0, 10)
nodeList := ""
@ -204,6 +200,8 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
return nodes, nil
}
// ParsePods parses the pods from the kubectl get pods command
// and returns a list of pods
func ParsePods(kubeconfig string, print bool) ([]Pod, error) {
pods := make([]Pod, 0, 10)
podList := ""
@ -232,3 +230,15 @@ func ParsePods(kubeconfig string, print bool) ([]Pod, error) {
}
return pods, nil
}
func PrintFileContents(f ...string) error {
for _, file := range f {
content, err := os.ReadFile(file)
if err != nil {
return err
}
fmt.Println(string(content) + "\n")
}
return nil
}

View File

@ -7,29 +7,18 @@ import (
"testing"
tf "github.com/k3s-io/k3s/tests/terraform"
cc "github.com/k3s-io/k3s/tests/terraform/createcluster"
"github.com/k3s-io/k3s/tests/terraform/createcluster"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var upgradeVersion = flag.String("upgrade_version", "", "a string")
var awsAmi = flag.String("aws_ami", "", "a valid ami string like ami-abcxyz123")
var nodeOs = flag.String("node_os", "ubuntu", "a string")
var externalDb = flag.String("external_db", "mysql", "a string")
var arch = flag.String("arch", "amd64", "a string")
var clusterType = flag.String("cluster_type", "etcd", "a string")
var resourceName = flag.String("resource_name", "etcd", "a string")
var sshuser = flag.String("sshuser", "ubuntu", "a string")
var sshkey = flag.String("sshkey", "", "a string")
var accessKey = flag.String("access_key", "", "local path to the private sshkey")
var serverNodes = flag.Int("no_of_server_nodes", 2, "count of server nodes")
var workerNodes = flag.Int("no_of_worker_nodes", 1, "count of worker nodes")
var tfVars = flag.String("tfvars", "/tests/terraform/modules/k3scluster/config/local.tfvars", "custom .tfvars file from base project path")
var destroy = flag.Bool("destroy", false, "a bool")
var failed = false
var terraformOptions map[string]interface{}
var (
arch = flag.String("arch", "amd64", "a string")
destroy = flag.Bool("destroy", false, "a bool")
failed = false
upgradeVersion = flag.String("upgradeVersion", "", "Version to upgrade the cluster to")
)
func Test_TFClusterUpgradeValidation(t *testing.T) {
RegisterFailHandler(Fail)
@ -37,56 +26,76 @@ func Test_TFClusterUpgradeValidation(t *testing.T) {
RunSpecs(t, "Upgrade Cluster Test Suite")
}
var _ = BeforeSuite(func() {
terraformOptions = cc.ClusterOptions(cc.NodeOs(*nodeOs), cc.AwsAmi(*awsAmi), cc.ClusterType(*clusterType), cc.ExternalDb(*externalDb), cc.ResourceName(*resourceName), cc.AccessKey(*accessKey), cc.Sshuser(*sshuser), cc.ServerNodes(*serverNodes), cc.WorkerNodes(*workerNodes), cc.Sshkey(*sshkey))
})
var _ = Describe("Test:", func() {
Context("Build Cluster:", func() {
It("Starts up with no issues", func() {
status, err := cc.BuildCluster(&testing.T{}, *tfVars, false, terraformOptions)
_, err := createcluster.BuildCluster(&testing.T{}, false)
Expect(err).NotTo(HaveOccurred())
Expect(status).To(Equal("cluster created"))
defer GinkgoRecover()
if strings.Contains(*clusterType, "etcd") {
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "\nBACKEND", *clusterType)
if strings.Contains(createcluster.ClusterType, "etcd") {
fmt.Println("Backend:", createcluster.ClusterType)
} else {
fmt.Println("\nCLUSTER CONFIG:\nOS", *nodeOs, "\nBACKEND", *externalDb)
fmt.Println("Backend:", createcluster.ExternalDb)
}
Expect(createcluster.KubeConfigFile).ShouldNot(BeEmpty())
if createcluster.ExternalDb != "" && createcluster.ClusterType == "" {
for i := 0; i > len(createcluster.MasterIPs); i++ {
cmd := "grep \"datastore-endpoint\" /etc/systemd/system/k3s.service"
res, err := tf.RunCmdOnNode(cmd, string(createcluster.MasterIPs[0]), createcluster.AwsUser, createcluster.AccessKey)
Expect(err).NotTo(HaveOccurred())
Expect(res).Should(ContainSubstring(createcluster.RenderedTemplate))
}
}
tf.PrintFileContents(createcluster.KubeConfigFile)
Expect(createcluster.KubeConfigFile).ShouldNot(BeEmpty())
Expect(createcluster.MasterIPs).ShouldNot(BeEmpty())
fmt.Println("Server Node IPS:", createcluster.MasterIPs)
fmt.Println("Agent Node IPS:", createcluster.WorkerIPs)
if createcluster.NumWorkers > 0 {
Expect(createcluster.WorkerIPs).ShouldNot(BeEmpty())
} else {
Expect(createcluster.WorkerIPs).Should(BeEmpty())
}
fmt.Printf("\nIPs:\n")
fmt.Println("Server Node IPS:", cc.MasterIPs)
fmt.Println("Agent Node IPS:", cc.WorkerIPs)
fmt.Println(cc.KubeConfigFile)
Expect(cc.KubeConfigFile).Should(ContainSubstring(*resourceName))
})
It("Checks Node and Pod Status", func() {
defer func() {
_, err := tf.ParseNodes(cc.KubeConfigFile, true)
_, err := tf.ParseNodes(createcluster.KubeConfigFile, true)
if err != nil {
fmt.Println("Error retrieving nodes: ", err)
}
_, err = tf.ParsePods(cc.KubeConfigFile, true)
_, err = tf.ParsePods(createcluster.KubeConfigFile, true)
if err != nil {
fmt.Println("Error retrieving pods: ", err)
}
}()
expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers
fmt.Printf("\nFetching node status\n")
expectedNodeCount := *serverNodes + *workerNodes + 1
Eventually(func(g Gomega) {
nodes, err := tf.ParseNodes(cc.KubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec")
nodes, err := tf.ParseNodes(createcluster.KubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
Expect(len(nodes)).To(Equal(expectedNodeCount),
"Number of nodes should match the spec")
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state")
g.Expect(node.Status).Should(Equal("Ready"),
"Nodes should all be in Ready state")
}
}, "420s", "5s").Should(Succeed())
}, "600s", "5s").Should(Succeed())
fmt.Printf("\nFetching pod status\n")
Eventually(func(g Gomega) {
pods, err := tf.ParsePods(cc.KubeConfigFile, false)
pods, err := tf.ParsePods(createcluster.KubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
@ -99,22 +108,26 @@ var _ = Describe("Test:", func() {
})
It("Verifies ClusterIP Service", func() {
_, err := tf.DeployWorkload("clusterip.yaml", cc.KubeConfigFile, *arch)
_, err := tf.DeployWorkload("clusterip.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "Cluster IP manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip" +
" --field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should((ContainSubstring("test-clusterip")))
g.Expect(res).Should(ContainSubstring("test-clusterip"))
}, "420s", "5s").Should(Succeed())
clusterip, _ := tf.FetchClusterIP(cc.KubeConfigFile, "nginx-clusterip-svc")
clusterip, _ := tf.FetchClusterIP(createcluster.KubeConfigFile, "nginx-clusterip-svc")
cmd := "curl -sL --insecure http://" + clusterip + "/name.html"
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
nodeExternalIP := tf.FetchNodeExternalIP(createcluster.KubeConfigFile)
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
res, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
res, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-clusterip"))
}, "420s", "10s").Should(Succeed())
@ -122,17 +135,22 @@ var _ = Describe("Test:", func() {
})
It("Verifies NodePort Service", func() {
_, err := tf.DeployWorkload("nodeport.yaml", cc.KubeConfigFile, *arch)
_, err := tf.DeployWorkload("nodeport.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "NodePort manifest not deployed")
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeExternalIP := tf.FetchNodeExternalIP(createcluster.KubeConfigFile)
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + createcluster.KubeConfigFile +
" --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport " +
"--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-nodeport"))
}, "240s", "5s").Should(Succeed())
@ -147,17 +165,23 @@ var _ = Describe("Test:", func() {
})
It("Verifies LoadBalancer Service", func() {
_, err := tf.DeployWorkload("loadbalancer.yaml", cc.KubeConfigFile, *arch)
_, err := tf.DeployWorkload("loadbalancer.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "Loadbalancer manifest not deployed")
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
nodeExternalIP := tf.FetchNodeExternalIP(createcluster.KubeConfigFile)
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + createcluster.KubeConfigFile +
" --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer " +
"--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
@ -165,6 +189,7 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd = "curl -sL --insecure http://" + ip + ":" + port + "/name.html"
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
@ -172,21 +197,24 @@ var _ = Describe("Test:", func() {
})
It("Verifies Ingress", func() {
_, err := tf.DeployWorkload("ingress.yaml", cc.KubeConfigFile, *arch)
_, err := tf.DeployWorkload("ingress.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "Ingress manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress " +
"--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-ingress"))
}, "240s", "5s").Should(Succeed())
ingressIps, err := tf.FetchIngressIP(cc.KubeConfigFile)
ingressIps, err := tf.FetchIngressIP(createcluster.KubeConfigFile)
Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned")
for _, ip := range ingressIps {
cmd := "curl -s --header host:foo1.bar.com" + " http://" + ip + "/name.html"
Eventually(func(g Gomega) {
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
@ -196,120 +224,129 @@ var _ = Describe("Test:", func() {
})
It("Verifies Daemonset", func() {
_, err := tf.DeployWorkload("daemonset.yaml", cc.KubeConfigFile, *arch)
_, err := tf.DeployWorkload("daemonset.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
nodes, _ := tf.ParseNodes(cc.KubeConfigFile, false)
pods, _ := tf.ParsePods(cc.KubeConfigFile, false)
nodes, _ := tf.ParseNodes(createcluster.KubeConfigFile, false)
pods, _ := tf.ParsePods(createcluster.KubeConfigFile, false)
Eventually(func(g Gomega) {
count := tf.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
g.Expect(len(nodes)).Should(Equal(count),
"Daemonset pod count does not match node count")
}, "420s", "10s").Should(Succeed())
})
It("Verifies Local Path Provisioner storage ", func() {
_, err := tf.DeployWorkload("local-path-provisioner.yaml", cc.KubeConfigFile, *arch)
_, err := tf.DeployWorkload("local-path-provisioner.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pvc local-path-pvc --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("local-path-pvc"))
g.Expect(res).Should(ContainSubstring("Bound"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pod volume-test --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
cmd := "kubectl --kubeconfig=" + cc.KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
cmd := "kubectl --kubeconfig=" + createcluster.KubeConfigFile + " exec volume-test -- sh -c 'echo local-path-test > /data/test'"
_, err = tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
cmd = "kubectl delete pod volume-test --kubeconfig=" + cc.KubeConfigFile
cmd = "kubectl delete pod volume-test --kubeconfig=" + createcluster.KubeConfigFile
_, err = tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
_, err = tf.DeployWorkload("local-path-provisioner.yaml", cc.KubeConfigFile, *arch)
_, err = tf.DeployWorkload("local-path-provisioner.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l app=local-path-provisioner " +
"--field-selector=status.phase=Running -n kube-system --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("pod/local-path-provisioner"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pod volume-test --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd = "kubectl exec volume-test --kubeconfig=" + cc.KubeConfigFile + " -- cat /data/test"
cmd = "kubectl exec volume-test --kubeconfig=" + createcluster.KubeConfigFile + " -- cat /data/test"
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("local-path-test"))
}, "180s", "2s").Should(Succeed())
})
It("Verifies dns access", func() {
_, err := tf.DeployWorkload("dnsutils.yaml", cc.KubeConfigFile, *arch)
_, err := tf.DeployWorkload("dnsutils.yaml", createcluster.KubeConfigFile, *arch)
Expect(err).NotTo(HaveOccurred(), "dnsutils manifest not deployed")
Eventually(func(g Gomega) {
cmd := "kubectl get pods dnsutils --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods dnsutils --kubeconfig=" + createcluster.KubeConfigFile
res, _ := tf.RunCommand(cmd)
g.Expect(res).Should(ContainSubstring("dnsutils"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl --kubeconfig=" + cc.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
cmd := "kubectl --kubeconfig=" + createcluster.KubeConfigFile +
" exec -t dnsutils -- nslookup kubernetes.default"
res, _ := tf.RunCommand(cmd)
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
}, "420s", "2s").Should(Succeed())
})
It("Verify Cluster is upgraded", func() {
MIPs := strings.Split(cc.MasterIPs, ",")
MIPs := strings.Split(createcluster.MasterIPs, ",")
for _, ip := range MIPs {
cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/master_cmd"
Eventually(func(g Gomega) {
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
_, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
g.Expect(err).NotTo(HaveOccurred())
}, "420s", "2s").Should(Succeed())
cmd = "sudo chmod u+x /tmp/master_cmd && sudo /tmp/master_cmd"
fmt.Println(cmd)
Eventually(func(g Gomega) {
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
_, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
g.Expect(err).NotTo(HaveOccurred())
}, "420s", "2s").Should(Succeed())
}
WIPs := strings.Split(cc.WorkerIPs, ",")
WIPs := strings.Split(createcluster.WorkerIPs, ",")
for _, ip := range WIPs {
// for i := 0; i < len(WIPs) && len(WIPs[0]) > 1; i++ {
// ip := WIPs[i]
// strings.TrimSpace(WIPs[i])
cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/agent_cmd"
Eventually(func(g Gomega) {
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
_, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
g.Expect(err).NotTo(HaveOccurred())
}, "420s", "2s").Should(Succeed())
cmd = "sudo chmod u+x /tmp/agent_cmd && sudo /tmp/agent_cmd"
Eventually(func(g Gomega) {
_, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
_, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
g.Expect(err).NotTo(HaveOccurred())
}, "420s", "2s").Should(Succeed())
}
@ -317,32 +354,38 @@ var _ = Describe("Test:", func() {
It("Checks Node and Pod Status after upgrade", func() {
defer func() {
_, err := tf.ParseNodes(cc.KubeConfigFile, true)
_, err := tf.ParseNodes(createcluster.KubeConfigFile, true)
if err != nil {
fmt.Println("Error retrieving nodes: ", err)
}
_, err = tf.ParsePods(cc.KubeConfigFile, true)
_, err = tf.ParsePods(createcluster.KubeConfigFile, true)
if err != nil {
fmt.Println("Error retrieving pods: ", err)
}
}()
fmt.Printf("\nFetching node status\n")
expectedNodeCount := *serverNodes + *workerNodes + 1
Eventually(func(g Gomega) {
nodes, err := tf.ParseNodes(cc.KubeConfigFile, false)
expectedNodeCount := createcluster.NumServers + createcluster.NumWorkers
nodes, err := tf.ParseNodes(createcluster.KubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(nodes)).To(Equal(expectedNodeCount), "Number of nodes should match the spec")
g.Expect(len(nodes)).To(Equal(expectedNodeCount),
"Number of nodes should match the spec")
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"), "Nodes should all be in Ready state")
g.Expect(node.Status).Should(Equal("Ready"),
"Nodes should all be in Ready state")
g.Expect(node.Version).Should(ContainSubstring(*upgradeVersion))
}
}, "420s", "5s").Should(Succeed())
fmt.Printf("\nFetching pod status\n")
Eventually(func(g Gomega) {
pods, err := tf.ParsePods(cc.KubeConfigFile, false)
pods, err := tf.ParsePods(createcluster.KubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
@ -355,18 +398,22 @@ var _ = Describe("Test:", func() {
It("Verifies ClusterIP Service after upgrade", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-clusterip " +
"--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should((ContainSubstring("test-clusterip")))
g.Expect(res).Should(ContainSubstring("test-clusterip"))
}, "420s", "5s").Should(Succeed())
clusterip, _ := tf.FetchClusterIP(cc.KubeConfigFile, "nginx-clusterip-svc")
clusterip, _ := tf.FetchClusterIP(createcluster.KubeConfigFile, "nginx-clusterip-svc")
cmd := "curl -sL --insecure http://" + clusterip + "/name.html"
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
nodeExternalIP := tf.FetchNodeExternalIP(createcluster.KubeConfigFile)
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
res, err := tf.RunCmdOnNode(cmd, ip, *sshuser, *accessKey)
res, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-clusterip"))
@ -375,15 +422,19 @@ var _ = Describe("Test:", func() {
})
It("Verifies NodePort Service after upgrade", func() {
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeExternalIP := tf.FetchNodeExternalIP(createcluster.KubeConfigFile)
cmd := "kubectl get service nginx-nodeport-svc --kubeconfig=" + createcluster.KubeConfigFile +
" --output jsonpath=\"{.spec.ports[0].nodePort}\""
nodeport, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-nodeport " +
"--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-nodeport"))
}, "240s", "5s").Should(Succeed())
@ -391,6 +442,7 @@ var _ = Describe("Test:", func() {
cmd = "curl -L --insecure http://" + ip + ":" + nodeport + "/name.html"
Eventually(func(g Gomega) {
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-nodeport"))
}, "240s", "5s").Should(Succeed())
@ -398,15 +450,19 @@ var _ = Describe("Test:", func() {
})
It("Verifies LoadBalancer Service after upgrade", func() {
nodeExternalIP := tf.FetchNodeExternalIP(cc.KubeConfigFile)
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" + cc.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
nodeExternalIP := tf.FetchNodeExternalIP(createcluster.KubeConfigFile)
cmd := "kubectl get service nginx-loadbalancer-svc --kubeconfig=" +
createcluster.KubeConfigFile + " --output jsonpath=\"{.spec.ports[0].port}\""
port, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
Expect(err).NotTo(HaveOccurred())
for _, ip := range nodeExternalIP {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-loadbalancer " +
"--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
@ -414,6 +470,7 @@ var _ = Describe("Test:", func() {
Eventually(func(g Gomega) {
cmd = "curl -sL --insecure http://" + ip + ":" + port + "/name.html"
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-loadbalancer"))
}, "240s", "5s").Should(Succeed())
@ -422,13 +479,15 @@ var _ = Describe("Test:", func() {
It("Verifies Ingress after upgrade", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress --field-selector=status.phase=Running --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l k8s-app=nginx-app-ingress " +
"--field-selector=status.phase=Running --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("test-ingress"))
}, "240s", "5s").Should(Succeed())
ingressIps, err := tf.FetchIngressIP(cc.KubeConfigFile)
ingressIps, err := tf.FetchIngressIP(createcluster.KubeConfigFile)
Expect(err).NotTo(HaveOccurred(), "Ingress ip is not returned")
for _, ip := range ingressIps {
@ -442,12 +501,13 @@ var _ = Describe("Test:", func() {
})
It("Verifies Daemonset after upgrade", func() {
nodes, _ := tf.ParseNodes(cc.KubeConfigFile, false)
pods, _ := tf.ParsePods(cc.KubeConfigFile, false)
nodes, _ := tf.ParseNodes(createcluster.KubeConfigFile, false)
pods, _ := tf.ParsePods(createcluster.KubeConfigFile, false)
Eventually(func(g Gomega) {
count := tf.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
g.Expect(len(nodes)).Should(Equal(count),
"Daemonset pod count does not match node count")
}, "420s", "10s").Should(Succeed())
Eventually(func(g Gomega) {
@ -461,23 +521,27 @@ var _ = Describe("Test:", func() {
It("Validating Local Path Provisioner storage after upgrade", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods -o=name -l app=local-path-provisioner --field-selector=status.phase=Running -n kube-system --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods -o=name -l app=local-path-provisioner" +
" --field-selector=status.phase=Running -n kube-system --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("pod/local-path-provisioner"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl get pod volume-test --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pod volume-test --kubeconfig=" + createcluster.KubeConfigFile
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("volume-test"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl exec volume-test --kubeconfig=" + cc.KubeConfigFile + " -- cat /data/test"
cmd := "kubectl exec volume-test --kubeconfig=" + createcluster.KubeConfigFile + " -- cat /data/test"
res, err := tf.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(res).Should(ContainSubstring("local-path-test"))
}, "180s", "2s").Should(Succeed())
@ -485,25 +549,24 @@ var _ = Describe("Test:", func() {
It("Verifies dns access after upgrade", func() {
Eventually(func(g Gomega) {
cmd := "kubectl get pods dnsutils --kubeconfig=" + cc.KubeConfigFile
cmd := "kubectl get pods dnsutils --kubeconfig=" + createcluster.KubeConfigFile
res, _ := tf.RunCommand(cmd)
g.Expect(res).Should(ContainSubstring("dnsutils"))
g.Expect(res).Should(ContainSubstring("Running"))
}, "420s", "2s").Should(Succeed())
Eventually(func(g Gomega) {
cmd := "kubectl --kubeconfig=" + cc.KubeConfigFile + " exec -t dnsutils -- nslookup kubernetes.default"
cmd := "kubectl --kubeconfig=" + createcluster.KubeConfigFile +
" exec -t dnsutils -- nslookup kubernetes.default"
res, _ := tf.RunCommand(cmd)
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
}, "420s", "2s").Should(Succeed())
})
})
})
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = BeforeEach(func() {
failed = failed || CurrentSpecReport().Failed()
if *destroy {
@ -511,14 +574,18 @@ var _ = BeforeEach(func() {
}
})
var _ = AfterSuite(func() {
if failed {
fmt.Println("FAILED!")
var _ = AfterEach(func() {
if CurrentSpecReport().Failed() {
fmt.Printf("\nFAILED! %s\n", CurrentSpecReport().FullText())
} else {
fmt.Println("PASSED!")
fmt.Printf("\nPASSED! %s\n", CurrentSpecReport().FullText())
}
})
var _ = AfterSuite(func() {
if *destroy {
status, err := cc.BuildCluster(&testing.T{}, *tfVars, *destroy, terraformOptions)
status, err := createcluster.BuildCluster(&testing.T{}, *destroy)
Expect(err).NotTo(HaveOccurred())
Expect(status).To(Equal("cluster destroyed"))
}