2021-07-28 16:28:26 +00:00
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2022-02-08 17:34:57 +00:00
|
|
|
"io/ioutil"
|
2021-12-22 17:51:48 +00:00
|
|
|
"os"
|
2021-07-28 16:28:26 +00:00
|
|
|
"os/exec"
|
2022-02-08 17:34:57 +00:00
|
|
|
"path/filepath"
|
2021-12-22 17:51:48 +00:00
|
|
|
"strconv"
|
2021-07-28 16:28:26 +00:00
|
|
|
"strings"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Node struct {
|
|
|
|
Name string
|
|
|
|
Status string
|
|
|
|
Roles string
|
|
|
|
InternalIP string
|
|
|
|
ExternalIP string
|
|
|
|
}
|
|
|
|
|
|
|
|
type Pod struct {
|
|
|
|
NameSpace string
|
|
|
|
Name string
|
|
|
|
Ready string
|
|
|
|
Status string
|
|
|
|
Restarts string
|
|
|
|
NodeIP string
|
|
|
|
Node string
|
|
|
|
}
|
|
|
|
|
2022-02-08 17:34:57 +00:00
|
|
|
func CountOfStringInSlice(str string, pods []Pod) int {
|
|
|
|
count := 0
|
|
|
|
for _, pod := range pods {
|
|
|
|
if strings.Contains(pod.Name, str) {
|
|
|
|
count++
|
|
|
|
}
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
return count
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
|
2022-08-15 22:00:22 +00:00
|
|
|
// genNodeEnvs generates the node and testing environment variables for vagrant up
|
|
|
|
func genNodeEnvs(nodeOS string, serverCount, agentCount int) ([]string, []string, string) {
|
2022-03-02 20:42:55 +00:00
|
|
|
serverNodeNames := make([]string, serverCount)
|
2021-12-22 17:51:48 +00:00
|
|
|
for i := 0; i < serverCount; i++ {
|
2022-03-02 20:42:55 +00:00
|
|
|
serverNodeNames[i] = "server-" + strconv.Itoa(i)
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2022-03-02 20:42:55 +00:00
|
|
|
agentNodeNames := make([]string, agentCount)
|
2021-12-22 17:51:48 +00:00
|
|
|
for i := 0; i < agentCount; i++ {
|
2022-03-02 20:42:55 +00:00
|
|
|
agentNodeNames[i] = "agent-" + strconv.Itoa(i)
|
2021-12-22 17:51:48 +00:00
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
|
2022-08-15 22:00:22 +00:00
|
|
|
nodeRoles := strings.Join(serverNodeNames, " ") + " " + strings.Join(agentNodeNames, " ")
|
2021-12-22 17:51:48 +00:00
|
|
|
nodeRoles = strings.TrimSpace(nodeRoles)
|
2022-08-15 22:00:22 +00:00
|
|
|
|
2022-02-08 17:34:57 +00:00
|
|
|
nodeBoxes := strings.Repeat(nodeOS+" ", serverCount+agentCount)
|
2021-12-22 17:51:48 +00:00
|
|
|
nodeBoxes = strings.TrimSpace(nodeBoxes)
|
2022-03-02 20:42:55 +00:00
|
|
|
|
2022-08-15 22:00:22 +00:00
|
|
|
nodeEnvs := fmt.Sprintf(`E2E_NODE_ROLES="%s" E2E_NODE_BOXES="%s"`, nodeRoles, nodeBoxes)
|
|
|
|
|
|
|
|
return serverNodeNames, agentNodeNames, nodeEnvs
|
|
|
|
}
|
|
|
|
|
|
|
|
func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
|
|
|
|
|
|
|
|
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
|
|
|
|
2022-03-02 20:42:55 +00:00
|
|
|
var testOptions string
|
|
|
|
for _, env := range os.Environ() {
|
|
|
|
if strings.HasPrefix(env, "E2E_") {
|
|
|
|
testOptions += " " + env
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-15 22:00:22 +00:00
|
|
|
cmd := fmt.Sprintf(`%s %s vagrant up &> vagrant.log`, nodeEnvs, testOptions)
|
2022-02-08 17:34:57 +00:00
|
|
|
fmt.Println(cmd)
|
|
|
|
if _, err := RunCommand(cmd); err != nil {
|
2022-08-15 22:00:22 +00:00
|
|
|
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
|
|
|
}
|
|
|
|
return serverNodeNames, agentNodeNames, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for
|
|
|
|
// this function to work. The binary is deployed as an airgapped install of k3s on the VMs.
|
2022-08-18 16:21:56 +00:00
|
|
|
// This is intended only for local testing purposes when writing a new E2E test.
|
2022-08-15 22:00:22 +00:00
|
|
|
func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, []string, error) {
|
|
|
|
|
|
|
|
serverNodeNames, agentNodeNames, nodeEnvs := genNodeEnvs(nodeOS, serverCount, agentCount)
|
|
|
|
|
|
|
|
var testOptions string
|
|
|
|
for _, env := range os.Environ() {
|
|
|
|
if strings.HasPrefix(env, "E2E_") {
|
|
|
|
testOptions += " " + env
|
|
|
|
}
|
|
|
|
}
|
|
|
|
testOptions += " E2E_RELEASE_VERSION=skip"
|
|
|
|
|
|
|
|
cmd := fmt.Sprintf(`%s vagrant up --no-provision &> vagrant.log`, nodeEnvs)
|
|
|
|
if _, err := RunCommand(cmd); err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed creating nodes: %s: %v", cmd, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeRoles := append(serverNodeNames, agentNodeNames...)
|
|
|
|
|
|
|
|
for _, node := range nodeRoles {
|
|
|
|
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
|
|
|
|
if _, err := RunCommand(cmd); err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
|
|
|
|
}
|
|
|
|
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd = fmt.Sprintf(`%s %s vagrant provision &>> vagrant.log`, nodeEnvs, testOptions)
|
|
|
|
if _, err := RunCommand(cmd); err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("failed creating cluster: %s: %v", cmd, err)
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2022-08-15 22:00:22 +00:00
|
|
|
|
2022-03-02 20:42:55 +00:00
|
|
|
return serverNodeNames, agentNodeNames, nil
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
|
2022-08-05 16:16:10 +00:00
|
|
|
func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) {
|
2022-03-15 17:29:56 +00:00
|
|
|
resourceDir := "../amd64_resource_files"
|
2022-08-05 16:16:10 +00:00
|
|
|
if hardened {
|
|
|
|
resourceDir = "../cis_amd64_resource_files"
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2022-03-15 17:29:56 +00:00
|
|
|
files, err := ioutil.ReadDir(resourceDir)
|
2021-12-22 17:51:48 +00:00
|
|
|
if err != nil {
|
2022-02-08 17:34:57 +00:00
|
|
|
err = fmt.Errorf("%s : Unable to read resource manifest file for %s", err, workload)
|
2021-12-22 17:51:48 +00:00
|
|
|
return "", err
|
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
fmt.Println("\nDeploying", workload)
|
|
|
|
for _, f := range files {
|
2022-03-15 17:29:56 +00:00
|
|
|
filename := filepath.Join(resourceDir, f.Name())
|
2022-02-08 17:34:57 +00:00
|
|
|
if strings.TrimSpace(f.Name()) == workload {
|
|
|
|
cmd := "kubectl apply -f " + filename + " --kubeconfig=" + kubeconfig
|
|
|
|
return RunCommand(cmd)
|
|
|
|
}
|
2021-12-22 17:51:48 +00:00
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
return "", nil
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
|
2022-02-08 17:34:57 +00:00
|
|
|
func DestroyCluster() error {
|
|
|
|
if _, err := RunCommand("vagrant destroy -f"); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return os.Remove("vagrant.log")
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
|
2022-06-14 15:40:29 +00:00
|
|
|
func FetchClusterIP(kubeconfig string, servicename string, dualStack bool) (string, error) {
|
|
|
|
if dualStack {
|
|
|
|
cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIPs}' --kubeconfig=" + kubeconfig
|
|
|
|
res, err := RunCommand(cmd)
|
|
|
|
if err != nil {
|
|
|
|
return res, err
|
|
|
|
}
|
|
|
|
res = strings.ReplaceAll(res, "\"", "")
|
|
|
|
return strings.Trim(res, "[]"), nil
|
|
|
|
}
|
2021-07-28 16:28:26 +00:00
|
|
|
cmd := "kubectl get svc " + servicename + " -o jsonpath='{.spec.clusterIP}' --kubeconfig=" + kubeconfig
|
2021-12-22 17:51:48 +00:00
|
|
|
return RunCommand(cmd)
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
|
2022-02-08 17:34:57 +00:00
|
|
|
func FetchIngressIP(kubeconfig string) ([]string, error) {
|
|
|
|
cmd := "kubectl get ing ingress -o jsonpath='{.status.loadBalancer.ingress[*].ip}' --kubeconfig=" + kubeconfig
|
|
|
|
res, err := RunCommand(cmd)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
ingressIP := strings.Trim(res, " ")
|
|
|
|
ingressIPs := strings.Split(ingressIP, " ")
|
|
|
|
return ingressIPs, nil
|
|
|
|
}
|
|
|
|
|
2021-12-22 17:51:48 +00:00
|
|
|
func FetchNodeExternalIP(nodename string) (string, error) {
|
|
|
|
cmd := "vagrant ssh " + nodename + " -c \"ip -f inet addr show eth1| awk '/inet / {print $2}'|cut -d/ -f1\""
|
|
|
|
ipaddr, err := RunCommand(cmd)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
ips := strings.Trim(ipaddr, "")
|
|
|
|
ip := strings.Split(ips, "inet")
|
|
|
|
nodeip := strings.TrimSpace(ip[1])
|
|
|
|
return nodeip, nil
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
|
|
|
|
func GenKubeConfigFile(serverName string) (string, error) {
|
|
|
|
cmd := fmt.Sprintf("vagrant ssh %s -c \"sudo cat /etc/rancher/k3s/k3s.yaml\"", serverName)
|
|
|
|
kubeConfig, err := RunCommand(cmd)
|
2021-12-22 17:51:48 +00:00
|
|
|
if err != nil {
|
2022-02-08 17:34:57 +00:00
|
|
|
return "", err
|
2021-12-22 17:51:48 +00:00
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
nodeIP, err := FetchNodeExternalIP(serverName)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
kubeConfig = strings.Replace(kubeConfig, "127.0.0.1", nodeIP, 1)
|
|
|
|
kubeConfigFile := fmt.Sprintf("kubeconfig-%s", serverName)
|
|
|
|
if err := os.WriteFile(kubeConfigFile, []byte(kubeConfig), 0644); err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return kubeConfigFile, nil
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
|
2022-03-01 19:28:39 +00:00
|
|
|
func GetVagrantLog() string {
|
|
|
|
log, err := os.Open("vagrant.log")
|
|
|
|
if err != nil {
|
|
|
|
return err.Error()
|
|
|
|
}
|
|
|
|
bytes, err := ioutil.ReadAll(log)
|
|
|
|
if err != nil {
|
|
|
|
return err.Error()
|
|
|
|
}
|
|
|
|
return string(bytes)
|
|
|
|
}
|
|
|
|
|
2022-02-08 17:34:57 +00:00
|
|
|
func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
|
2021-07-28 16:28:26 +00:00
|
|
|
nodes := make([]Node, 0, 10)
|
|
|
|
nodeList := ""
|
2021-12-22 17:51:48 +00:00
|
|
|
|
|
|
|
cmd := "kubectl get nodes --no-headers -o wide -A --kubeconfig=" + kubeConfig
|
|
|
|
res, err := RunCommand(cmd)
|
2022-02-08 17:34:57 +00:00
|
|
|
|
2021-12-22 17:51:48 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
nodeList = strings.TrimSpace(res)
|
|
|
|
split := strings.Split(nodeList, "\n")
|
|
|
|
for _, rec := range split {
|
|
|
|
if strings.TrimSpace(rec) != "" {
|
|
|
|
fields := strings.Fields(rec)
|
|
|
|
node := Node{
|
|
|
|
Name: fields[0],
|
|
|
|
Status: fields[1],
|
|
|
|
Roles: fields[2],
|
|
|
|
InternalIP: fields[5],
|
2022-03-17 23:56:10 +00:00
|
|
|
}
|
|
|
|
if len(fields) > 6 {
|
|
|
|
node.ExternalIP = fields[6]
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2021-12-22 17:51:48 +00:00
|
|
|
nodes = append(nodes, node)
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
if print {
|
2021-07-28 16:28:26 +00:00
|
|
|
fmt.Println(nodeList)
|
|
|
|
}
|
2021-12-22 17:51:48 +00:00
|
|
|
return nodes, nil
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
|
|
|
|
2022-02-08 17:34:57 +00:00
|
|
|
func ParsePods(kubeconfig string, print bool) ([]Pod, error) {
|
2021-07-28 16:28:26 +00:00
|
|
|
pods := make([]Pod, 0, 10)
|
|
|
|
podList := ""
|
2021-12-22 17:51:48 +00:00
|
|
|
|
|
|
|
cmd := "kubectl get pods -o wide --no-headers -A --kubeconfig=" + kubeconfig
|
|
|
|
res, _ := RunCommand(cmd)
|
|
|
|
res = strings.TrimSpace(res)
|
|
|
|
podList = res
|
|
|
|
|
|
|
|
split := strings.Split(res, "\n")
|
|
|
|
for _, rec := range split {
|
|
|
|
fields := strings.Fields(string(rec))
|
|
|
|
pod := Pod{
|
|
|
|
NameSpace: fields[0],
|
|
|
|
Name: fields[1],
|
|
|
|
Ready: fields[2],
|
|
|
|
Status: fields[3],
|
|
|
|
Restarts: fields[4],
|
|
|
|
NodeIP: fields[6],
|
|
|
|
Node: fields[7],
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2021-12-22 17:51:48 +00:00
|
|
|
pods = append(pods, pod)
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
if print {
|
2021-07-28 16:28:26 +00:00
|
|
|
fmt.Println(podList)
|
|
|
|
}
|
2021-12-22 17:51:48 +00:00
|
|
|
return pods, nil
|
2021-07-28 16:28:26 +00:00
|
|
|
}
|
2022-02-08 17:34:57 +00:00
|
|
|
|
2022-03-01 19:28:39 +00:00
|
|
|
// RestartCluster restarts the k3s service on each node given
|
|
|
|
func RestartCluster(nodeNames []string) error {
|
|
|
|
for _, nodeName := range nodeNames {
|
|
|
|
cmd := "sudo systemctl restart k3s"
|
|
|
|
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-02-08 17:34:57 +00:00
|
|
|
// RunCmdOnNode executes a command from within the given node
|
|
|
|
func RunCmdOnNode(cmd string, nodename string) (string, error) {
|
2022-03-01 19:28:39 +00:00
|
|
|
runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename
|
2022-08-15 22:00:22 +00:00
|
|
|
out, err := RunCommand(runcmd)
|
|
|
|
if err != nil {
|
|
|
|
return out, fmt.Errorf("failed to run command %s on node %s: %v", cmd, nodename, err)
|
|
|
|
}
|
|
|
|
return out, nil
|
2022-02-08 17:34:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// RunCommand executes a command on the host
|
|
|
|
func RunCommand(cmd string) (string, error) {
|
|
|
|
c := exec.Command("bash", "-c", cmd)
|
2022-03-01 19:28:39 +00:00
|
|
|
out, err := c.CombinedOutput()
|
|
|
|
return string(out), err
|
2022-02-08 17:34:57 +00:00
|
|
|
}
|
|
|
|
|
2022-03-02 20:42:55 +00:00
|
|
|
func UpgradeCluster(serverNodeNames []string, agentNodeNames []string) error {
|
|
|
|
for _, nodeName := range serverNodeNames {
|
2022-08-26 18:42:04 +00:00
|
|
|
cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName
|
2022-02-08 17:34:57 +00:00
|
|
|
fmt.Println(cmd)
|
|
|
|
if out, err := RunCommand(cmd); err != nil {
|
|
|
|
fmt.Println("Error Upgrading Cluster", out)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2022-03-02 20:42:55 +00:00
|
|
|
for _, nodeName := range agentNodeNames {
|
2022-08-26 18:42:04 +00:00
|
|
|
cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName
|
2022-02-08 17:34:57 +00:00
|
|
|
if _, err := RunCommand(cmd); err != nil {
|
|
|
|
fmt.Println("Error Upgrading Cluster", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|