Add E2E to Drone (#6890)

* Initial drone vagrant pipeline

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Build e2e test image

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Add docker registry to E2E pipeline

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Bump libvirt image

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Add ci flag to secretsencryption

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Fix vagrant log on secretsencryption

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Remove DB parallel tests

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Reduce sonobuoy tests even further

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Add local build

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Add cron conformance pipeline

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Add string output for nodes

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Switch snapshot restore for upgrade cluster

Signed-off-by: Derek Nola <derek.nola@suse.com>

* Fix cp

Signed-off-by: Derek Nola <derek.nola@suse.com>

---------

Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
Derek Nola 2023-03-10 19:53:41 -08:00 committed by GitHub
parent ea094d1d49
commit 522ad1e697
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 215 additions and 57 deletions

View File

@ -128,6 +128,48 @@ volumes:
host:
path: /var/run/docker.sock
---
kind: pipeline
name: conformance
platform:
os: linux
arch: amd64
trigger:
event:
- cron
cron:
- nightly
steps:
- name: build
image: rancher/dapper:v0.5.0
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
- name: test
image: rancher/dapper:v0.5.0
environment:
ENABLE_REGISTRY: 'true'
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
path: /var/run/docker.sock
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
name: arm64
@ -499,3 +541,65 @@ trigger:
depends_on:
- manifest
---
kind: pipeline
name: e2e
type: docker
platform:
os: linux
arch: amd64
steps:
- name: build-e2e-image
image: rancher/dapper:v0.5.0
commands:
- DOCKER_BUILDKIT=1 docker build --target test-e2e -t test-e2e -f Dockerfile.test .
- SKIP_VALIDATE=true SKIP_AIRGAP=true dapper ci
- cp dist/artifacts/* /tmp/artifacts/
volumes:
- name: cache
path: /tmp/artifacts
- name: docker
path: /var/run/docker.sock
- name: test-e2e
image: test-e2e
pull: never
resources:
cpu: 6000
memory: 10Gi
environment:
E2E_REGISTRY: 'true'
commands:
- mkdir -p dist/artifacts
- cp /tmp/artifacts/* dist/artifacts/
- docker stop registry && docker rm registry
- docker run -d -p 5000:5000 -e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io --name registry registry:2
- cd tests/e2e/validatecluster
- vagrant destroy -f
- go test -v -timeout=30m ./validatecluster_test.go -ci -local
- cd ../secretsencryption
- vagrant destroy -f
- go test -v -timeout=30m ./secretsencryption_test.go -ci -local
- cd ../upgradecluster
- E2E_RELEASE_CHANNEL="latest" go test -v -timeout=45m ./upgradecluster_test.go -ci -local
- docker stop registry && docker rm registry
volumes:
- name: libvirt
path: /var/run/libvirt/
- name: docker
path: /var/run/docker.sock
- name: cache
path: /tmp/artifacts
volumes:
- name: docker
host:
path: /var/run/docker.sock
- name: libvirt
host:
path: /var/run/libvirt/
- name: cache
temp: {}

View File

@ -34,3 +34,21 @@ ENV TEST_CLEANUP true
ENTRYPOINT ["./scripts/entry.sh"]
CMD ["test"]
FROM vagrantlibvirt/vagrant-libvirt:0.10.7 AS test-e2e
RUN apt-get update && apt-get install -y docker.io
RUN vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp
RUN vagrant box add generic/ubuntu2004 --provider libvirt --force
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
chmod +x ./kubectl; \
mv ./kubectl /usr/local/bin/kubectl
ENV GO_VERSION 1.19.2
RUN curl -O -L "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz"; \
rm -rf /usr/local/go; \
tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz;
ENV PATH="${PATH}:/usr/local/go/bin"

View File

@ -43,26 +43,47 @@ echo "Did test-run-lazypull $?"
[ "$ARCH" != 'amd64' ] && \
early-exit "Skipping remaining tests, images not available for $ARCH."
E2E_OUTPUT=$artifacts test-run-sonobuoy serial
echo "Did test-run-sonobuoy serial $?"
# ---
if [ "$DRONE_BUILD_EVENT" = 'tag' ]; then
E2E_OUTPUT=$artifacts test-run-sonobuoy serial
echo "Did test-run-sonobuoy serial $?"
E2E_OUTPUT=$artifacts test-run-sonobuoy parallel
echo "Did test-run-sonobuoy parallel $?"
early-exit 'Skipping remaining tests on tag.'
fi
# ---
test-run-sonobuoy etcd serial
echo "Did test-run-sonobuoy-etcd serial $?"
test-run-sonobuoy mysql serial
echo "Did test-run-sonobuoy-mysqk serial $?"
test-run-sonobuoy postgres serial
echo "Did test-run-sonobuoy-postgres serial $?"
if [ "$DRONE_BUILD_EVENT" = 'cron' ]; then
E2E_OUTPUT=$artifacts test-run-sonobuoy serial
echo "Did test-run-sonobuoy serial $?"
test-run-sonobuoy etcd serial
echo "Did test-run-sonobuoy-etcd serial $?"
test-run-sonobuoy mysql serial
echo "Did test-run-sonobuoy-mysqk serial $?"
test-run-sonobuoy postgres serial
echo "Did test-run-sonobuoy-postgres serial $?"
# Wait until all serial tests have finished
# Wait until all serial tests have finished
delay=15
(
set +x
while [ $(count-running-tests) -ge 1 ]; do
sleep $delay
done
)
E2E_OUTPUT=$artifacts test-run-sonobuoy parallel
echo "Did test-run-sonobuoy parallel $?"
test-run-sonobuoy etcd parallel
echo "Did test-run-sonobuoy-etcd parallel $?"
test-run-sonobuoy mysql parallel
echo "Did test-run-sonobuoy-mysql parallel $?"
test-run-sonobuoy postgres parallel
echo "Did test-run-sonobuoy-postgres parallel $?"
fi
# Wait until all tests have finished
delay=15
(
set +x
@ -70,16 +91,5 @@ while [ $(count-running-tests) -ge 1 ]; do
sleep $delay
done
)
E2E_OUTPUT=$artifacts test-run-sonobuoy parallel
echo "Did test-run-sonobuoy parallel $?"
test-run-sonobuoy etcd parallel
echo "Did test-run-sonobuoy-etcd parallel $?"
test-run-sonobuoy mysql parallel
echo "Did test-run-sonobuoy-mysql parallel $?"
test-run-sonobuoy postgres parallel
echo "Did test-run-sonobuoy-postgres parallel $?"
exit 0

View File

@ -29,9 +29,6 @@ E2E_REGISTRY=true E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v validate
echo 'RUNNING SECRETS ENCRYPTION TEST'
/usr/local/go/bin/go test -v secretsencryption/secretsencryption_test.go -nodeOS="$nodeOS" -serverCount=$((servercount)) -timeout=1h -json -ci | tee -a k3s_"$OS".log
echo 'RUN CLUSTER RESET TEST'
/usr/local/go/bin/go test -v clusterreset/clusterreset_test.go -nodeOS="$nodeOS" -serverCount=3 -agentCount=1 -timeout=30m -json -ci | tee -a createreport/k3s_"$OS".log
echo 'RUNNING SPLIT SERVER VALIDATION TEST'
E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v splitserver/splitserver_test.go -nodeOS="$nodeOS" -timeout=30m -json -ci | tee -a k3s_"$OS".log

View File

@ -17,6 +17,7 @@ var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
@ -39,7 +40,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
Context("Secrets Keys are rotated:", func() {
It("Starts up with no issues", func() {
var err error
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
if *local {
serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0)
} else {
serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
@ -108,7 +113,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
})
It("Checks node and pod status", func() {
@ -167,7 +172,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
})
It("Restarts K3s servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
})
It("Verifies encryption rotate stage", func() {
@ -202,7 +207,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() {
})
It("Restarts K3s Servers", func() {
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed())
Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil))
})
It("Verifies Encryption Reencrypt Stage", func() {

View File

@ -6,6 +6,7 @@ import (
"os"
"strings"
"testing"
"time"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
@ -44,7 +45,7 @@ var (
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
Context("Cluster :", func() {
Context("Cluster creates snapshots and workloads:", func() {
It("Starts up with no issues", func() {
var err error
if *local {
@ -123,6 +124,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
}, "240s", "5s").Should(Succeed())
})
})
Context("Cluster is reset normally", func() {
It("Resets the cluster", func() {
for _, nodeName := range serverNodeNames {
cmd := "sudo systemctl stop k3s"
@ -169,6 +172,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
for _, nodeName := range serverNodeNames[1:] {
cmd := "sudo systemctl start k3s"
Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred())
time.Sleep(20 * time.Second) //Stagger the restarts for etcd leaners
}
})
@ -177,7 +181,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
nodeJournal, _ := e2e.GetJournalLogs(node.Name)
g.Expect(node.Status).Should(Equal("Ready"), nodeJournal)
}
}, "420s", "5s").Should(Succeed())
@ -204,6 +209,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() {
Expect(res).Should(ContainSubstring("test-nodeport"))
})
})
Context("Cluster restores from snapshot", func() {
It("Restores the snapshot", func() {
//Stop k3s on all nodes
for _, nodeName := range serverNodeNames {

View File

@ -25,6 +25,10 @@ type Node struct {
ExternalIP string
}
func (n Node) String() string {
return fmt.Sprintf("Node (name: %s, status: %s, roles: %s)", n.Name, n.Status, n.Roles)
}
type Pod struct {
NameSpace string
Name string
@ -136,6 +140,19 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri
return serverNodeNames, agentNodeNames, nil
}
func scpK3sBinary(nodeNames []string) error {
for _, node := range nodeNames {
cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil {
return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
}
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
return err
}
}
return nil
}
// CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for
// this function to work. The binary is deployed as an airgapped install of k3s on the VMs.
// This is intended only for local testing purposes when writing a new E2E test.
@ -173,14 +190,8 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [
if err := errg.Wait(); err != nil {
return nil, nil, err
}
for _, node := range append(serverNodeNames, agentNodeNames...) {
cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node)
if _, err := RunCommand(cmd); err != nil {
return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err)
}
if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil {
return nil, nil, err
}
if err := scpK3sBinary(append(serverNodeNames, agentNodeNames...)); err != nil {
return nil, nil, err
}
// Install K3s on all nodes in parallel
@ -302,13 +313,18 @@ func GenReport(specReport ginkgo.SpecReport) {
fmt.Printf("%s", status)
}
func GetJournalLogs(node string) (string, error) {
cmd := "journalctl -u k3s* --no-pager"
return RunCmdOnNode(cmd, node)
}
// GetVagrantLog returns the logs of on vagrant commands that initialize the nodes and provision K3s on each node.
// It also attempts to fetch the systemctl logs of K3s on nodes where the k3s.service failed.
func GetVagrantLog(cErr error) string {
var nodeErr *NodeError
nodeJournal := ""
if errors.As(cErr, &nodeErr) {
nodeJournal, _ = RunCmdOnNode("sudo journalctl -u k3s* --no-pager", nodeErr.Node)
nodeJournal, _ = GetJournalLogs(nodeErr.Node)
nodeJournal = "\nNode Journal Logs:\n" + nodeJournal
}
@ -331,7 +347,7 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) {
res, err := RunCommand(cmd)
if err != nil {
return nil, err
return nil, fmt.Errorf("unable to get nodes: %s: %v", res, err)
}
nodeList = strings.TrimSpace(res)
split := strings.Split(nodeList, "\n")
@ -391,7 +407,7 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) {
// RestartCluster restarts the k3s service on each node given
func RestartCluster(nodeNames []string) error {
for _, nodeName := range nodeNames {
cmd := "sudo systemctl restart k3s"
cmd := "sudo systemctl restart k3s*"
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
@ -416,19 +432,18 @@ func RunCommand(cmd string) (string, error) {
return string(out), err
}
func UpgradeCluster(serverNodeNames []string, agentNodeNames []string) error {
for _, nodeName := range serverNodeNames {
cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName
fmt.Println(cmd)
if out, err := RunCommand(cmd); err != nil {
fmt.Println("Error Upgrading Cluster", out)
func UpgradeCluster(nodeNames []string, local bool) error {
upgradeVersion := "E2E_RELEASE_CHANNEL=commit"
if local {
if err := scpK3sBinary(nodeNames); err != nil {
return err
}
upgradeVersion = "E2E_RELEASE_VERSION=skip"
}
for _, nodeName := range agentNodeNames {
cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName
if _, err := RunCommand(cmd); err != nil {
fmt.Println("Error Upgrading Cluster", err)
for _, nodeName := range nodeNames {
cmd := upgradeVersion + " vagrant provision " + nodeName
if out, err := RunCommand(cmd); err != nil {
fmt.Println("Error Upgrading Cluster", out)
return err
}
}

View File

@ -25,8 +25,10 @@ def provision(vm, role, role_num, node_num)
load vagrant_defaults
defaultOSConfigure(vm)
if !RELEASE_VERSION.empty?
if RELEASE_VERSION == "skip"
install_type = "INSTALL_K3S_SKIP_DOWNLOAD=true"
elsif !RELEASE_VERSION.empty?
install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}"
elsif RELEASE_CHANNEL == "commit"
vm.provision "shell", path: "../scripts/latest_commit.sh", args: ["master", "/tmp/k3s_commits"]

View File

@ -20,10 +20,11 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 2, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "Controls which version k3s upgrades too, local binary or latest commit on master")
// Environment Variables Info:
// E2E_REGISTRY: true/false (default: false)
// Controls which K3s version is installed first, upgrade is always to latest commit
// Controls which K3s version is installed first
// E2E_RELEASE_VERSION=v1.23.3+k3s1
// OR
// E2E_RELEASE_CHANNEL=(commit|latest|stable), commit pulls latest commit from master
@ -249,9 +250,8 @@ var _ = Describe("Verify Upgrade", Ordered, func() {
It("Upgrades with no issues", func() {
var err error
err = e2e.UpgradeCluster(serverNodeNames, agentNodeNames)
fmt.Println(err)
Expect(err).NotTo(HaveOccurred())
Expect(e2e.UpgradeCluster(append(serverNodeNames, agentNodeNames...), *local)).To(Succeed())
Expect(e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed())
fmt.Println("CLUSTER UPGRADED")
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())