Convert snapshotter test in e2e test

Signed-off-by: Derek Nola <derek.nola@suse.com>
This commit is contained in:
Derek Nola 2023-10-11 11:26:36 -07:00
parent d022a506d5
commit da7312d082
7 changed files with 208 additions and 217 deletions

View File

@ -35,7 +35,7 @@ jobs:
fail-fast: false
matrix:
# TODO fix embeddedmirror and add it to the matrix
etest: [startup, s3, externalip, privateregistry]
etest: [startup, s3, btrfs, externalip, privateregistry]
max-parallel: 3
steps:
- name: "Checkout"

View File

@ -1,82 +0,0 @@
name: Snapshotter
on:
push:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/snapshotter/**"
- ".github/**"
- "!.github/workflows/snapshotter.yaml"
pull_request:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/snapshotter/**"
- ".github/**"
- "!.github/workflows/snapshotter.yaml"
workflow_dispatch: {}
permissions:
contents: read
jobs:
build:
uses: ./.github/workflows/build-k3s.yaml
test:
name: "Smoke Test"
needs: build
runs-on: ubuntu-latest
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
vm: [opensuse-leap]
snapshotter: [btrfs]
max-parallel: 1
defaults:
run:
working-directory: tests/snapshotter/${{ matrix.snapshotter }}/${{ matrix.vm }}
env:
VAGRANT_EXPERIMENTAL: disks
steps:
- name: "Checkout"
uses: actions/checkout@v4
with: { fetch-depth: 1 }
- name: "Download Binary"
uses: actions/download-artifact@v3
with: { name: k3s, path: dist/artifacts/ }
- name: Set up vagrant and libvirt
uses: ./.github/actions/vagrant-setup
# Workaround for https://github.com/actions/cache/issues/1319
- name: Move vagrant cache to /root
run: |
mkdir -p /tmp/boxes
sudo rm -rf /root/.vagrant.d/boxes
sudo mv -f /tmp/boxes /root/.vagrant.d
- name: "Vagrant Cache"
uses: actions/cache@v4
with:
path: |
/tmp/boxes
/tmp/gems
key: vagrant-box-${{ matrix.vm }}
- name: "Vagrant Plugin(s)"
run: sudo vagrant plugin install vagrant-k3s
- name: "Vagrant Up ⏩ Install K3s"
run: sudo vagrant up
- name: "⏳ Node"
run: sudo vagrant provision --provision-with=k3s-wait-for-node
- name: "⏳ CoreDNS"
run: sudo vagrant provision --provision-with=k3s-wait-for-coredns
- name: "k3s-status" # kubectl get node,all -A -o wide
run: sudo vagrant provision --provision-with=k3s-status
- name: "k3s-snapshots" # if no snapshots then we fail
run: sudo vagrant provision --provision-with=k3s-snapshots
- name: Copy out vagrant boxes for cache
run: |
sudo mv -f /root/.vagrant.d/boxes /tmp/boxes
sudo chmod -R 777 /tmp/boxes

View File

@ -66,9 +66,9 @@ Docker tests run clusters of K3s nodes as containers and test basic functionalit
___
## Smoke Tests
## Install Tests
Smoke tests are a collection of tests defined under the [tests](./tests) and fall into two categories: install and snapshotter. These tests are used to validate the installation and operation of K3s on a variety of operating systems. The sub-directories therein contain fixtures for running simple clusters to assert correct behavior for "happy path" scenarios. The test themses are Vagrantfiles describing single-node installations that are easily spun up with Vagrant for the `libvirt` and `virtualbox` providers:
Install tests are a collection of tests defined under the [tests/install](./tests/install). These tests are used to validate the installation and operation of K3s on a variety of operating systems. The test themselves are Vagrantfiles describing single-node installations that are easily spun up with Vagrant for the `libvirt` and `virtualbox` providers:
- [Install Script](install) :arrow_right: scheduled nightly and on an install script change
- [CentOS 7](install/centos-7) (stand-in for RHEL 7)
@ -77,16 +77,11 @@ Smoke tests are a collection of tests defined under the [tests](./tests) and fal
- [Fedora 37](install/fedora)
- [Leap 15.5](install/opensuse-leap) (stand-in for SLES)
- [Ubuntu 22.04](install/ubuntu-2204)
- [Snapshotter](snapshotter/btrfs/opensuse-leap) :arrow_right: on any code change
- [BTRFS](snapshotter/btrfs) ([containerd built-in](https://github.com/containerd/containerd/tree/main/snapshots/btrfs))
- [Leap 15.4](../tests/snapshotter/btrfs/opensuse-leap)
## Format
When adding new installer test(s) please copy the prevalent style for the `Vagrantfile`.
Ideally, the boxes used for additional assertions will support the default `libvirt` provider which
enables them to be used by our Github Actions Workflow(s). See:
- [install.yaml](../.github/workflows/install.yaml).
- [snapshotter.yaml](../.github/workflows/snapshotter.yaml).
enables them to be used by our GitHub Actions [Install Test Workflow](../.github/workflows/install.yaml).
### Framework
@ -132,11 +127,6 @@ vagrant provision --provision-with=k3s-status
vagrant provision --provision-with=k3s-procps
```
The **Snapshotter** test requires that k3s binary is built at `dist/artifacts/k3s`.
It is invoked similarly, i.e. `vagrant up`, but with different sets of named shell provisioners.
Take a look at the individual Vagrantfiles and/or the Github Actions workflows that harness them to get
an idea of how they can be invoked.
___
## Performance Tests

102
tests/e2e/btrfs/Vagrantfile vendored Normal file
View File

@ -0,0 +1,102 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
ENV['VAGRANT_EXPERIMENTAL']="disks"
ENV['VAGRANT_LOG']="error"
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['opensuse/Leap-15.5.x86_64'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks
NETWORK_PREFIX = "10.10.10"
install_type = ""
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
# An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}", netmask: "255.255.255.0"
vagrant_defaults = '../vagrantdefaults.rb'
load vagrant_defaults if File.exists?(vagrant_defaults)
defaultOSConfigure(vm)
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision 'setup-btrfs', type: 'shell', run: 'once', privileged: true do |sh|
sh.inline = <<~EOF
#!/usr/bin/env bash
set -eu -o pipefail
zypper install -y apparmor-parser btrfsprogs hostname
mkdir -p /var/lib/rancher/k3s /etc/rancher/k3s /usr/local/bin
if ! mountpoint -q /var/lib/rancher/k3s; then
: ${BTRFS_DEV:=#{ENV['BTRFS_DEV']}}
for disk in sd[b-d] vd[b-d] xd[b-d]; do
if [ -n "${BTRFS_DEV}" ]; then break; fi
: ${BTRFS_DEV:=$(test -b /dev/$disk && echo $disk)}
done
btrfs filesystem show /dev/${BTRFS_DEV:?unable to determine automatically, please specify} 2>/dev/null || mkfs -t btrfs /dev/${BTRFS_DEV}
mountpoint -q /mnt || mount -t btrfs /dev/${BTRFS_DEV} /mnt
btrfs subvolume show /mnt/@k3s 2>/dev/null || btrfs subvolume create /mnt/@k3s
umount /mnt
mount -t btrfs -o subvol=@k3s /dev/${BTRFS_DEV} /var/lib/rancher/k3s
fi
EOF
end
if role.include?("server") && role_num == 0
vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = ["server"]
k3s.env = %W[#{install_type}]
k3s.config = <<~YAML
cluster-init: true
node-external-ip: "#{NETWORK_PREFIX}.100"
flannel-iface: eth1
snapshotter: btrfs
token: vagrant
disable:
- local-storage
- metrics-server
- servicelb
- traefik
write-kubeconfig-mode: '0644'
YAML
end
end
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"]
# Default provider is libvirt, virtualbox is only provided as a backup
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
v.storage :file, :size => '8G' # Requires VAGRANT_EXPERIMENTAL="disks"
end
config.vm.provider "virtualbox" do |v,o|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
v.gui = false
v.check_guest_additions = false
o.vm.disk :disk, name: "btrfs", size: "8GB" # Requires VAGRANT_EXPERIMENTAL="disks"
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -0,0 +1,99 @@
package rotateca
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
// Environment Variables Info:
// E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master
func Test_E2EBtrfsSnapshot(t *testing.T) {
RegisterFailHandler(Fail)
flag.Parse()
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "Btrfs Snapshot Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify that btrfs based servers work", Ordered, func() {
Context("Btrfs Snapshots are taken", func() {
It("Starts up with no issues", func() {
var err error
// OS and server are hardcoded because only openSUSE Leap 15.5 natively supports Btrfs
if *local {
serverNodeNames, _, err = e2e.CreateLocalCluster("opensuse/Leap-15.5.x86_64", 1, 0)
} else {
serverNodeNames, _, err = e2e.CreateCluster("opensuse/Leap-15.5.x86_64", 1, 0)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("Server Nodes:", serverNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks node and pod status", func() {
fmt.Printf("\nFetching node status\n")
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParseNodes(kubeConfigFile, true)
fmt.Printf("\nFetching pods status\n")
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
_, _ = e2e.ParsePods(kubeConfigFile, true)
})
It("Checks that btrfs snapshots exist", func() {
cmd := "btrfs subvolume list /var/lib/rancher/k3s/agent/containerd/io.containerd.snapshotter.v1.btrfs"
res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
Expect(res).To(ContainSubstring("agent/containerd/io.containerd.snapshotter.v1.btrfs/active/2"))
Expect(res).To(ContainSubstring("agent/containerd/io.containerd.snapshotter.v1.btrfs/snapshots/3"))
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})

View File

@ -8,6 +8,7 @@ import (
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
@ -289,6 +290,8 @@ func GenKubeConfigFile(serverName string) (string, error) {
if err != nil {
return "", err
}
re := regexp.MustCompile(`(?m)==> vagrant:.*\n`)
kubeConfig = re.ReplaceAllString(kubeConfig, "")
nodeIP, err := FetchNodeExternalIP(serverName)
if err != nil {
return "", err

View File

@ -1,121 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
#
# Vagrant box for testing k3s with the btrfs snapshotter. Usage:
# VAGRANT_EXPERIMENTAL=disks vagrant up
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s"]
config.vm.box = "opensuse/Leap-15.5.x86_64"
config.vm.boot_timeout = ENV['TEST_VM_BOOT_TIMEOUT'] || 600 # seconds
config.vm.synced_folder '../../../../dist/artifacts', '/vagrant', type: 'rsync', disabled: false,
rsync__exclude: ENV['RSYNC_EXCLUDE'] || '*.tar.*'
config.vm.define 'snapshotter-btrfs', primary: true do |test|
test.vm.hostname = 'smoke'
test.vm.provision 'k3s-prepare', type: 'shell', run: 'once', privileged: true do |sh|
sh.inline = <<~EOF
#!/usr/bin/env bash
set -eu -o pipefail
zypper install -y apparmor-parser btrfsprogs hostname
mkdir -p /var/lib/rancher/k3s /etc/rancher/k3s /usr/local/bin
if ! mountpoint -q /var/lib/rancher/k3s; then
: ${BTRFS_DEV:=#{ENV['BTRFS_DEV']}}
for disk in sd[b-d] vd[b-d] xd[b-d]; do
if [ -n "${BTRFS_DEV}" ]; then break; fi
: ${BTRFS_DEV:=$(test -b /dev/$disk && echo $disk)}
done
btrfs filesystem show /dev/${BTRFS_DEV:?unable to determine automatically, please specify} 2>/dev/null || mkfs -t btrfs /dev/${BTRFS_DEV}
mountpoint -q /mnt || mount -t btrfs /dev/${BTRFS_DEV} /mnt
btrfs subvolume show /mnt/@k3s 2>/dev/null || btrfs subvolume create /mnt/@k3s
umount /mnt
mount -t btrfs -o subvol=@k3s /dev/${BTRFS_DEV} /var/lib/rancher/k3s
fi
# Install k3s binary
install -m 755 /vagrant/k3s /usr/local/bin
if [ -e /vagrant/*.tar ]; then
mkdir -vp /var/lib/rancher/k3s/agent/images
for tar in /vagrant/*.tar; do
cp -vf $tar /var/lib/rancher/k3s/agent/images/
done
fi
EOF
end
test.vm.provision 'k3s-install', type: 'k3s', run: 'once' do |k3s|
k3s.args = %w[server --snapshotter=btrfs]
k3s.env = %w[INSTALL_K3S_NAME=server INSTALL_K3S_SKIP_DOWNLOAD=true K3S_TOKEN=vagrant]
k3s.config = <<~YAML
disable:
- local-storage
- metrics-server
- servicelb
- traefik
disable-helm-controller: true
disable-network-policy: true
write-kubeconfig-mode: '0644'
YAML
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
end
test.vm.provision "k3s-wait-for-node", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
sh.env = { :PATH => "/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin" }
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eu -o pipefail
echo 'Waiting for node to be ready ...'
time timeout 300 bash -c 'while ! (kubectl wait --for condition=ready node/$(hostnamectl --static) 2>/dev/null); do sleep 5; done'
kubectl get node,all -A -o wide
SHELL
end
test.vm.provision "k3s-wait-for-coredns", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
sh.env = { :PATH => "/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin" }
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eu -o pipefail
function describe-coredns {
RC=$?
if [[ $RC -ne 0 ]]; then
kubectl describe node
kubectl --namespace kube-system describe pod -l k8s-app=kube-dns
kubectl --namespace kube-system logs -l k8s-app=kube-dns
fi
exit $RC
}
trap describe-coredns EXIT
time timeout 300 bash -c 'while ! (kubectl --namespace kube-system rollout status --timeout 10s deploy/coredns 2>/dev/null); do sleep 5; done'
SHELL
end
test.vm.provision "k3s-status", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
sh.env = { :PATH => "/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin" }
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
kubectl get node,all -A -o wide
SHELL
end
test.vm.provision "k3s-snapshots", type: "shell", run: ENV['CI'] == 'true' ? 'never' : 'once' do |sh|
sh.env = { :PATH => "/usr/local/bin:/usr/local/sbin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin" }
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
btrfs subvolume list /var/lib/rancher/k3s/agent/containerd/io.containerd.snapshotter.v1.btrfs
SHELL
end
end
%w[libvirt virtualbox].each do |p|
config.vm.provider p do |v|
v.cpus = ENV['TEST_VM_CPUS'] || 2
v.memory = ENV['TEST_VM_MEMORY'] || 2048
end
end
config.vm.provider :libvirt do |v,o|
v.storage :file, :size => '8G'
end
config.vm.provider :virtualbox do |v,o|
v.gui = false
v.check_guest_additions = false
o.vm.disk :disk, name: "btrfs", size: "8GB" # Requires VAGRANT_EXPERIMENTAL="disks"
end
end