mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
f8b661d590
* Update to v1.26.0-alpha.2 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * go generate Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Default CURRENT_VERSION to VERSION_TAG for alpha versions Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * remove containerd package Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Update k8s to v1.26.0-rc.0-k3s1 cri-tools cri-dockerd and cadvisor Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * replace cri-api reference to the new api Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * go mod tidy Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix version script to allow rc and alphas Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix version script to allow rc and alphas Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Fix version script to allow rc and alphas Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Update to Kubernetes 1.26.0-rc.1 Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Undo helm-controller pin Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Bump containerd to -k3s2 for stargz fix Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * DevicePlugins featuregate is locked to on Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Bump kine for DeleteRange fix Signed-off-by: Brad Davidson <brad.davidson@rancher.com> * Update to v1.26.0-k3s1 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * go mod tidy Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * Bring back snapshotter checks and update golang to 1.19.4 Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> * fix windows containerd snapshotter checks Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com> Signed-off-by: Brad Davidson <brad.davidson@rancher.com> Co-authored-by: Brad Davidson <brad.davidson@rancher.com>
87 lines
2.8 KiB
Bash
Executable File
87 lines
2.8 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
all_services=(
|
|
coredns
|
|
local-path-provisioner
|
|
metrics-server
|
|
traefik
|
|
)
|
|
|
|
export NUM_SERVERS=1
|
|
export NUM_AGENTS=1
|
|
export WAIT_SERVICES="${all_services[@]}"
|
|
|
|
REPO=${REPO:-rancher}
|
|
IMAGE_NAME=${IMAGE_NAME:-k3s}
|
|
CURRENT_CHANNEL=$(grep 'k8s.io/kubernetes v' go.mod | head -n1 | awk '{print $2}' | awk -F. '{print "v1." $2}')
|
|
CURRENT_VERSION=$(curl -s https://update.k3s.io/v1-release/channels/${CURRENT_CHANNEL} -o /dev/null -w '%{redirect_url}' | awk -F/ '{print gensub(/\+/, "-", "g", $NF)}')
|
|
if [ -z "${CURRENT_VERSION}" ]; then
|
|
CURRENT_VERSION=${VERSION_TAG}
|
|
fi
|
|
export K3S_IMAGE_SERVER=${REPO}/${IMAGE_NAME}:${CURRENT_VERSION}${SUFFIX}
|
|
export K3S_IMAGE_AGENT=${REPO}/${IMAGE_NAME}:${CURRENT_VERSION}${SUFFIX}
|
|
|
|
server-pre-hook(){
|
|
local testID=$(basename $TEST_DIR)
|
|
export SERVER_DOCKER_ARGS="\
|
|
--mount type=volume,src=k3s-server-$1-${testID,,}-rancher,dst=/var/lib/rancher/k3s \
|
|
--mount type=volume,src=k3s-server-$1-${testID,,}-etc,dst=/etc/rancher"
|
|
}
|
|
export -f server-pre-hook
|
|
|
|
agent-pre-hook(){
|
|
local testID=$(basename $TEST_DIR)
|
|
export AGENT_DOCKER_ARGS="\
|
|
--mount type=volume,src=k3s-agent-$1-${testID,,}-rancher,dst=/var/lib/rancher/k3s \
|
|
--mount type=volume,src=k3s-agent-$1-${testID,,}-etc,dst=/etc/rancher"
|
|
}
|
|
export -f agent-pre-hook
|
|
|
|
start-test() {
|
|
# Create a pod and print the version before upgrading
|
|
kubectl get node -o wide
|
|
kubectl create -f scripts/airgap/volume-test.yaml
|
|
|
|
# Add post-hook sleeps to give the kubelet time to update the version after startup
|
|
server-post-hook(){
|
|
sleep 15
|
|
}
|
|
export -f server-post-hook
|
|
agent-post-hook(){
|
|
sleep 15
|
|
}
|
|
export -f agent-post-hook
|
|
|
|
# Switch the image back to the current build, delete the node containers, and re-provision with the same datastore volumes
|
|
unset K3S_IMAGE_SERVER
|
|
unset K3S_IMAGE_AGENT
|
|
if [ $NUM_AGENTS -gt 0 ]; then
|
|
for i in $(seq 1 $NUM_AGENTS); do
|
|
docker rm -f -v $(cat $TEST_DIR/agents/$i/metadata/name)
|
|
rm -rf $TEST_DIR/agents/$i
|
|
done
|
|
fi
|
|
for i in $(seq 1 $NUM_SERVERS); do
|
|
docker rm -f -v $(cat $TEST_DIR/servers/$i/metadata/name)
|
|
rm -rf $TEST_DIR/servers/$i
|
|
done
|
|
provision-cluster
|
|
|
|
# Confirm that the nodes are running the current build and that the pod we created earlier is still there
|
|
. ./scripts/version.sh || true
|
|
kubectl get node -o wide | grep -F $VERSION
|
|
kubectl get pod -n kube-system volume-test -o wide
|
|
}
|
|
export -f start-test
|
|
|
|
test-cleanup-hook(){
|
|
local testID=$(basename $TEST_DIR)
|
|
docker volume ls -q | grep -F ${testID,,} | xargs -r docker volume rm
|
|
}
|
|
export -f test-cleanup-hook
|
|
|
|
# --- create a single-node cluster from the latest release, then restart the containers with the current build
|
|
LABEL=UPGRADE run-test
|
|
|
|
cleanup-test-env
|