Creation of K3s integration test Sonobuoy plugin (#3931)

* Added test runner and build files
* Changes to int test to output junit results.
* Updated documentation, removed comments

Signed-off-by: dereknola <derek.nola@suse.com>
This commit is contained in:
Derek Nola 2021-08-30 08:27:59 -07:00 committed by GitHub
parent 84155ee313
commit 60297a1bbe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 149 additions and 13 deletions

View File

@ -7,6 +7,7 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
testutil "github.com/rancher/k3s/tests/util" testutil "github.com/rancher/k3s/tests/util"
) )
@ -117,5 +118,7 @@ var _ = AfterSuite(func() {
func Test_IntegrationEtcd(t *testing.T) { func Test_IntegrationEtcd(t *testing.T) {
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
RunSpecs(t, "Etcd Suite") RunSpecsWithDefaultAndCustomReporters(t, "Etcd Suite", []Reporter{
reporters.NewJUnitReporter("/tmp/results/junit-etcd.xml"),
})
} }

View File

@ -4,6 +4,7 @@ import (
"testing" "testing"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/rancher/k3s/pkg/flock" "github.com/rancher/k3s/pkg/flock"
) )
@ -30,5 +31,7 @@ var _ = Describe("file locks", func() {
func TestFlock(t *testing.T) { func TestFlock(t *testing.T) {
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
RunSpecs(t, "Flock Suite") RunSpecsWithDefaultAndCustomReporters(t, "Flock Suite", []Reporter{
reporters.NewJUnitReporter("/tmp/results/junit-flock.xml"),
})
} }

66
scripts/build-tests-sonobuoy Executable file
View File

@ -0,0 +1,66 @@
#!/bin/bash
set -e
cd $(dirname $0)/..
REPO="k3s-int-tests"
OUTFILE="./dist/artifacts/k3s-int-tests.yaml"
# Compile all integration tests and containerize them
mkdir -p dist/artifacts
go test -c -v -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" -o dist/artifacts/k3s-integration-1.test ./tests/integration/... -run Integration
PKG_TO_TEST=$(find ./pkg/ -type f -name "*_int_test.go" | sed -r 's|/[^/]+$||' |sort -u)
INDEX=1
for i in $PKG_TO_TEST; do
echo $i
go test -c -v -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" -o dist/artifacts/k3s-integration-$INDEX.test $i -run Integration
INDEX=$(expr $INDEX + 1)
done
go test -c -v -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" -o dist/artifacts/k3s-integration-$INDEX.test ./tests/integration/... -run Integration
docker build -f ./tests/integration/Dockerfile.test -t $REPO .
docker save $REPO -o ./dist/artifacts/$REPO.tar
sudo mkdir -p /var/lib/rancher/k3s/agent/images
sudo mv ./dist/artifacts/$REPO.tar /var/lib/rancher/k3s/agent/images/
# If k3s is already running, attempt to import the image
if [[ "$(pgrep k3s | wc -l)" -gt 0 ]]; then
sudo ./dist/artifacts/k3s ctr images import /var/lib/rancher/k3s/agent/images/$REPO.tar
fi
# Cleanup compiled tests
rm dist/artifacts/k3s-integration-*
# Generate the sonobuoy plugin and inject the necessary
# podSpec and volume mount modifications
PODSPEC=\
' hostNetwork: true
volumes:
- name: var-k3s
hostPath:
path: /var/lib/rancher/k3s/
type: Directory
- name: etc-k3s
hostPath:
path: /etc/rancher/k3s/
type: Directory'
VOLMOUNTS=\
' - mountPath: /var/lib/rancher/k3s/
name: var-k3s
- mountPath: /etc/rancher/k3s/
name: etc-k3s'
sonobuoy gen plugin \
--format=junit \
--image ${REPO} \
--show-default-podspec \
--name k3s-int \
--type job \
--cmd ./test-runner.sh \
--env KUBECONFIG=/etc/rancher/k3s/k3s.yaml \
> $OUTFILE
awk -v PS="$PODSPEC" '/podSpec:/{print;print PS;next}1' $OUTFILE > ./dist/artifacts/temp.yaml
mv ./dist/artifacts/temp.yaml $OUTFILE
awk -v VM="$VOLMOUNTS" '/volumeMounts:/{print;print VM;next}1' $OUTFILE > ./dist/artifacts/temp.yaml
mv ./dist/artifacts/temp.yaml $OUTFILE

View File

@ -76,11 +76,23 @@ Integration tests can be run with no k3s cluster present, each test will spin up
go test ./pkg/... ./tests/... -run Integration go test ./pkg/... ./tests/... -run Integration
``` ```
Integration tests can also be run on an existing single-node cluster via compile time flag, tests will skip if the server is not configured correctly. Integration tests can be run on an existing single-node cluster via compile time flag, tests will skip if the server is not configured correctly.
``` ```bash
go test -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" ./pkg/... ./tests/... -run Integration go test -ldflags "-X 'github.com/rancher/k3s/tests/util.existingServer=True'" ./pkg/... ./tests/... -run Integration
``` ```
Integration tests can also be run via a [Sonobuoy](https://sonobuoy.io/docs/v0.53.2/) plugin on an existing single-node cluster.
```bash
./scripts/build-tests-sonobuoy
sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml sonobuoy run --plugin ./dist/artifacts/k3s-int-tests.yaml
```
Check the sonobuoy status and retrieve results
```
sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml sonobuoy status
sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml sonobuoy retrieve
sudo KUBECONFIG=/etc/rancher/k3s/k3s.yaml sonobuoy results <TAR_FILE_FROM_RETRIEVE>
```
___ ___
## End-to-End (E2E) Tests ## End-to-End (E2E) Tests

View File

@ -0,0 +1,16 @@
FROM golang:buster
RUN apt update && \
apt install -y curl git lsof bash openssh-server gcc g++ make ca-certificates && \
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
WORKDIR $GOPATH/src/github.com/rancher/k3s-io/k3s/
COPY ./tests/testdata ./testdata
COPY ./tests/integration/test-runner.sh .
COPY ./dist/artifacts/k3s /usr/local/bin
COPY ./dist/artifacts/k3s-integration-* ./tests/
RUN go get -u github.com/onsi/gomega
RUN go get -u github.com/onsi/ginkgo

View File

@ -8,6 +8,7 @@ import (
"testing" "testing"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
testutil "github.com/rancher/k3s/tests/util" testutil "github.com/rancher/k3s/tests/util"
) )
@ -35,8 +36,9 @@ var _ = Describe("local storage", func() {
}, "90s", "1s").Should(MatchRegexp("kube-system.+coredns.+1\\/1.+Running")) }, "90s", "1s").Should(MatchRegexp("kube-system.+coredns.+1\\/1.+Running"))
}) })
It("creates a new pvc", func() { It("creates a new pvc", func() {
Expect(testutil.K3sCmd("kubectl", "create", "-f", "../testdata/localstorage_pvc.yaml")). result, err := testutil.K3sCmd("kubectl", "create", "-f", "../testdata/localstorage_pvc.yaml")
To(ContainSubstring("persistentvolumeclaim/local-path-pvc created")) Expect(result).To(ContainSubstring("persistentvolumeclaim/local-path-pvc created"))
Expect(err).NotTo(HaveOccurred())
}) })
It("creates a new pod", func() { It("creates a new pod", func() {
Expect(testutil.K3sCmd("kubectl", "create", "-f", "../testdata/localstorage_pod.yaml")). Expect(testutil.K3sCmd("kubectl", "create", "-f", "../testdata/localstorage_pod.yaml")).
@ -44,11 +46,14 @@ var _ = Describe("local storage", func() {
}) })
It("shows storage up in kubectl", func() { It("shows storage up in kubectl", func() {
Eventually(func() (string, error) { Eventually(func() (string, error) {
return testutil.K3sCmd("kubectl", "get", "pvc") return testutil.K3sCmd("kubectl", "get", "--namespace=default", "pvc")
}, "45s", "1s").Should(MatchRegexp(`local-path-pvc.+Bound`)) }, "45s", "1s").Should(MatchRegexp(`local-path-pvc.+Bound`))
Eventually(func() (string, error) { Eventually(func() (string, error) {
return testutil.K3sCmd("kubectl", "get", "pv") return testutil.K3sCmd("kubectl", "get", "--namespace=default", "pv")
}, "10s", "1s").Should(MatchRegexp(`pvc.+2Gi.+Bound`)) }, "10s", "1s").Should(MatchRegexp(`pvc.+2Gi.+Bound`))
Eventually(func() (string, error) {
return testutil.K3sCmd("kubectl", "get", "--namespace=default", "pod")
}, "10s", "1s").Should(MatchRegexp(`volume-test.+Running`))
}) })
It("has proper folder permissions", func() { It("has proper folder permissions", func() {
var k3sStorage = "/var/lib/rancher/k3s/storage" var k3sStorage = "/var/lib/rancher/k3s/storage"
@ -56,7 +61,7 @@ var _ = Describe("local storage", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(fmt.Sprintf("%04o", fileStat.Mode().Perm())).To(Equal("0701")) Expect(fmt.Sprintf("%04o", fileStat.Mode().Perm())).To(Equal("0701"))
pvResult, err := testutil.K3sCmd("kubectl", "get", "pv") pvResult, err := testutil.K3sCmd("kubectl", "get", "--namespace=default", "pv")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
reg, err := regexp.Compile(`pvc[^\s]+`) reg, err := regexp.Compile(`pvc[^\s]+`)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -66,9 +71,9 @@ var _ = Describe("local storage", func() {
Expect(fmt.Sprintf("%04o", fileStat.Mode().Perm())).To(Equal("0777")) Expect(fmt.Sprintf("%04o", fileStat.Mode().Perm())).To(Equal("0777"))
}) })
It("deletes properly", func() { It("deletes properly", func() {
Expect(testutil.K3sCmd("kubectl", "delete", "pod", "volume-test")). Expect(testutil.K3sCmd("kubectl", "delete", "--namespace=default", "--force", "pod", "volume-test")).
To(ContainSubstring("pod \"volume-test\" deleted")) To(ContainSubstring("pod \"volume-test\" force deleted"))
Expect(testutil.K3sCmd("kubectl", "delete", "pvc", "local-path-pvc")). Expect(testutil.K3sCmd("kubectl", "delete", "--namespace=default", "pvc", "local-path-pvc")).
To(ContainSubstring("persistentvolumeclaim \"local-path-pvc\" deleted")) To(ContainSubstring("persistentvolumeclaim \"local-path-pvc\" deleted"))
}) })
}) })
@ -82,5 +87,7 @@ var _ = AfterSuite(func() {
func Test_IntegrationLocalStorage(t *testing.T) { func Test_IntegrationLocalStorage(t *testing.T) {
RegisterFailHandler(Fail) RegisterFailHandler(Fail)
RunSpecs(t, "Local Storage Suite") RunSpecsWithDefaultAndCustomReporters(t, "Local Storage Suite", []Reporter{
reporters.NewJUnitReporter("/tmp/results/junit-ls.xml"),
})
} }

View File

@ -0,0 +1,29 @@
#!/bin/sh
set -x
results_dir="${RESULTS_DIR:-/tmp/results}"
# saveResults prepares the results for handoff to the Sonobuoy worker.
# See: https://github.com/vmware-tanzu/sonobuoy/blob/master/site/content/docs/master/plugins.md
saveResults() {
cd ${results_dir}
# Sonobuoy worker expects a tar file.
tar czf results.tar.gz *
# Signal to the worker that we are done and where to find the results.
printf ${results_dir}/results.tar.gz > ${results_dir}/done
}
# Ensure that we tell the Sonobuoy worker we are done regardless of results.
trap saveResults EXIT
runTests() {
cd ./tests
for t in *.test; do
# Run each test (automatically saves the output in the results directory).
./$t
done
}
runTests