Merge branch 'master' into selinux-install

This commit is contained in:
Erik Wilson 2020-03-13 09:51:36 -07:00 committed by GitHub
commit 5753dbd6ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 1025 additions and 265 deletions

View File

@ -1,12 +1,7 @@
./bin
./etc
./build/data
./build/data.tar.gz
./pkg/data/zz_generated_bindata.go
./package/data.tar.gz
./.vagrant
./.cache
./.dapper
./data-dir
./dist
./.trash-cache

View File

@ -59,6 +59,31 @@ steps:
event:
- tag
- name: rpm-publish
image: centos:7
environment:
PRIVATE_KEY:
from_secret: private_key
PRIVATE_KEY_PASS_PHRASE:
from_secret: private_key_pass_phrase
AWS_S3_BUCKET:
from_secret: aws_s3_bucket
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
commands:
- scripts/provision/generic/centos7/yum-install-rpm-tools
- scripts/package-rpm
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
- name: test
image: rancher/dapper:v0.4.2
secrets: [ gcloud_auth ]
@ -154,6 +179,31 @@ steps:
event:
- tag
- name: rpm-publish
image: centos:7
environment:
PRIVATE_KEY:
from_secret: private_key
PRIVATE_KEY_PASS_PHRASE:
from_secret: private_key_pass_phrase
AWS_S3_BUCKET:
from_secret: aws_s3_bucket
AWS_ACCESS_KEY_ID:
from_secret: aws_access_key_id
AWS_SECRET_ACCESS_KEY:
from_secret: aws_secret_access_key
commands:
- scripts/provision/generic/centos7/yum-install-rpm-tools
- scripts/package-rpm
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
- name: test
image: rancher/dapper:v0.4.2
secrets: [ gcloud_auth ]
@ -273,16 +323,56 @@ steps:
- DOCKER_USERNAME
- DOCKER_PASSWORD
- DRONE_TAG
when:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
trigger:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
depends_on:
- amd64
- arm64
- arm
---
kind: pipeline
name: dispatch
platform:
os: linux
arch: amd64
steps:
- name: dispatch
image: rancher/dapper:v0.4.2
environment:
PAT_USERNAME:
from_secret: pat_username
PAT_TOKEN:
from_secret: github_token
commands:
- dapper dispatch
volumes:
- name: docker
path: /var/run/docker.sock
trigger:
instance:
- drone-publish.rancher.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
volumes:
- name: docker
host:
path: /var/run/docker.sock
depends_on:
- manifest

1
.gitignore vendored
View File

@ -28,3 +28,4 @@ __pycache__
/tests/.tox/
/tests/.vscode
/sonobuoy-output
*.tmp

View File

@ -1,13 +1,13 @@
FROM golang:1.13.8-alpine3.10
RUN apk -U --no-cache add bash git gcc musl-dev docker vim less file curl wget ca-certificates jq linux-headers zlib-dev tar zip squashfs-tools npm coreutils \
python2 python3 py3-pip python3-dev openssl-dev libffi-dev libseccomp libseccomp-dev make libuv-static
RUN pip3 install 'tox==3.6.0'
RUN apk -U --no-cache --repository http://dl-3.alpinelinux.org/alpine/edge/main/ add sqlite-dev sqlite-static
python2 openssl-dev libffi-dev libseccomp libseccomp-dev make libuv-static
RUN apk -U --no-cache --repository http://dl-3.alpinelinux.org/alpine/edge/main/ add sqlite-dev sqlite-static libselinux libselinux-dev
RUN mkdir -p /go/src/golang.org/x && \
cd /go/src/golang.org/x && git clone https://github.com/golang/tools && \
git -C /go/src/golang.org/x/tools checkout -b current aa82965741a9fecd12b026fbb3d3c6ed3231b8f8 && \
go install golang.org/x/tools/cmd/goimports
cd /go/src/golang.org/x && git clone https://github.com/golang/tools && cd tools && \
git checkout -b current aa82965741a9fecd12b026fbb3d3c6ed3231b8f8 && \
go install golang.org/x/tools/cmd/goimports && cd
RUN rm -rf /go/src /go/pkg
ARG DAPPER_HOST_ARCH
@ -17,6 +17,9 @@ RUN if [ "${ARCH}" == "amd64" ]; then \
curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.15.0; \
fi
ARG SELINUX=true
ENV SELINUX $SELINUX
ARG DQLITE=true
ENV DQLITE $DQLITE
COPY --from=rancher/dqlite-build:v1.3.1-r1 /dist/artifacts /usr/src/

72
Vagrantfile vendored
View File

@ -1,12 +1,13 @@
BOX = "generic/alpine310"
OS = (ENV['OS'] || "alpine310")
BOX_REPO = (ENV['BOX_REPO'] || "generic")
HOME = File.dirname(__FILE__)
PROJECT = File.basename(HOME)
MOUNT_TYPE = ENV['MOUNT_TYPE'] || "nfs"
NUM_NODES = (ENV['NUM_NODES'] || 0).to_i
NODE_CPUS = (ENV['NODE_CPUS'] || 4).to_i
NODE_MEMORY = (ENV['NODE_MEMORY'] || 8192).to_i
NETWORK_PREFIX = ENV['NETWORK_PREFIX'] || "10.135.135"
VAGRANT_PROVISION = ENV['VAGRANT_PROVISION'] || "./scripts/vagrant-provision"
VAGRANT_PROVISION = ENV['VAGRANT_PROVISION'] || "./scripts/provision/vagrant"
MOUNT_TYPE = ENV['MOUNT_TYPE'] || "nfs"
# --- Rules for /etc/sudoers to avoid password entry configuring NFS:
# %admin ALL = (root) NOPASSWD: /usr/bin/sed -E -e * -ibak /etc/exports
@ -14,61 +15,36 @@ VAGRANT_PROVISION = ENV['VAGRANT_PROVISION'] || "./scripts/vagrant-provision"
# %admin ALL = (root) NOPASSWD: /sbin/nfsd restart
# --- May need to add terminal to System Preferences -> Security & Privacy -> Privacy -> Full Disk Access
# --- Check for missing plugins
required_plugins = %w( vagrant-alpine vagrant-timezone )
plugin_installed = false
required_plugins.each do |plugin|
unless Vagrant.has_plugin?(plugin)
system "vagrant plugin install #{plugin}"
plugin_installed = true
end
def provision(vm, node_num)
node_os = (ENV["OS_#{node_num}"] || OS)
vm.box = (ENV["BOX_#{node_num}"] || ENV["BOX"] || "#{BOX_REPO}/#{node_os}")
vm.hostname = "#{PROJECT}-#{node_num}-#{node_os}"
vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+node_num}"
vm.provision "shell",
path: VAGRANT_PROVISION,
env: { 'HOME' => HOME, 'GOPATH' => ENV['GOPATH'], 'BOX' => vm.box }
end
# --- If new plugins installed, restart Vagrant process
if plugin_installed === true
exec "vagrant #{ARGV.join' '}"
end
provision = <<SCRIPT
# --- Use system gopath if available
export GOPATH=#{ENV['GOPATH']}
# --- Default to root user for vagrant ssh
cat <<\\EOF >/etc/profile.d/root.sh
[ $EUID -ne 0 ] && exec sudo -i
EOF
# --- Set home to current directory
cat <<\\EOF >/etc/profile.d/home.sh
export HOME="#{HOME}" && cd
EOF
. /etc/profile.d/home.sh
# --- Run vagrant provision script if available
if [ ! -x #{VAGRANT_PROVISION} ]; then
echo 'WARNING: Unable to execute provision script "#{VAGRANT_PROVISION}"'
exit
fi
echo "running '#{VAGRANT_PROVISION}'..." && \
#{VAGRANT_PROVISION} && \
echo "finished '#{VAGRANT_PROVISION}'!"
SCRIPT
Vagrant.configure("2") do |config|
config.vm.provider "virtualbox" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
v.customize ["modifyvm", :id, "--audio", "none"]
end
config.vm.box = BOX
config.vm.hostname = PROJECT
if Vagrant.has_plugin?("vagrant-timezone")
config.timezone.value = :host
end
config.vm.synced_folder ".", HOME, type: MOUNT_TYPE
config.vm.provision "shell", inline: provision
config.timezone.value = :host
config.vm.network "private_network", ip: "#{NETWORK_PREFIX}.100" if NUM_NODES==0
(1..NUM_NODES).each do |i|
config.vm.define ".#{i}" do |node|
node.vm.network "private_network", ip: "#{NETWORK_PREFIX}.#{100+i}"
node.vm.hostname = "#{PROJECT}-#{i}"
if NUM_NODES==0
provision(config.vm, 0)
else
(1..NUM_NODES).each do |i|
config.vm.define ".#{i}" do |node|
provision(node.vm, i)
end
end
end
end

View File

@ -2,7 +2,9 @@
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s server {{ extra_server_args | default("") }}
@ -12,6 +14,7 @@ LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=infinity
Restart=always
RestartSec=5s

View File

@ -2,7 +2,11 @@
Description=Lightweight Kubernetes
Documentation=https://k3s.io
After=network-online.target
[Service]
Type=notify
ExecStartPre=-/sbin/modprobe br_netfilter
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }}
KillMode=process
Delegate=yes
@ -10,6 +14,7 @@ LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
TimeoutStartSec=infinity
Restart=always
RestartSec=5s

6
go.mod
View File

@ -7,9 +7,9 @@ replace (
github.com/containerd/btrfs => github.com/containerd/btrfs v0.0.0-20181101203652-af5082808c83
github.com/containerd/cgroups => github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601
github.com/containerd/console => github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50
github.com/containerd/containerd => github.com/rancher/containerd v1.3.3-k3s1
github.com/containerd/containerd => github.com/rancher/containerd v1.3.3-k3s2
github.com/containerd/continuity => github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02
github.com/containerd/cri => github.com/rancher/cri v1.3.0-k3s.3
github.com/containerd/cri => github.com/rancher/cri v1.3.0-k3s.4
github.com/containerd/fifo => github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c
github.com/containerd/go-runc => github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda
github.com/containerd/typeurl => github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd
@ -94,6 +94,7 @@ require (
github.com/mattn/go-sqlite3 v1.13.0
github.com/natefinch/lumberjack v2.0.0+incompatible
github.com/opencontainers/runc v1.0.0-rc9
github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52
github.com/pkg/errors v0.8.1
github.com/rakelkar/gonetsh v0.0.0-20190719023240-501daadcadf8 // indirect
github.com/rancher/dynamiclistener v0.2.0
@ -122,6 +123,5 @@ require (
k8s.io/component-base v0.0.0
k8s.io/cri-api v0.0.0
k8s.io/klog v1.0.0
k8s.io/kubelet v0.0.0
k8s.io/kubernetes v1.16.0
)

8
go.sum
View File

@ -708,10 +708,10 @@ github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:
github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
github.com/rakelkar/gonetsh v0.0.0-20190719023240-501daadcadf8 h1:83l9gPhYtgxODlZKU0Odq4pQuDcMZEVgAh364+PV3OU=
github.com/rakelkar/gonetsh v0.0.0-20190719023240-501daadcadf8/go.mod h1:4XHkfaUj+URzGO9sohoAgt2V9Y8nIW7fugpu0E6gShk=
github.com/rancher/containerd v1.3.3-k3s1 h1:j8NGZdXKsZd2ne0XQg6OBfMJ/NkY/Qri6QhscGrJp2M=
github.com/rancher/containerd v1.3.3-k3s1/go.mod h1:ZMfzmqce2Z+QSEqdHMfeJs1TZ/UeJ1aDrazjpQT4ehM=
github.com/rancher/cri v1.3.0-k3s.3 h1:j/Sq2LMyg6gBn2MS1j5dEudpdL+UYVH7ubbewUCXkS0=
github.com/rancher/cri v1.3.0-k3s.3/go.mod h1:Ht5T1dIKzm+4NExmb7wDVG6qR+j0xeXIjjhCv1d9geY=
github.com/rancher/containerd v1.3.3-k3s2 h1:RZr+TqFt7+YsrSYkyytlhW4HmneWeFNM7IymNOoGW6A=
github.com/rancher/containerd v1.3.3-k3s2/go.mod h1:ZMfzmqce2Z+QSEqdHMfeJs1TZ/UeJ1aDrazjpQT4ehM=
github.com/rancher/cri v1.3.0-k3s.4 h1:BXER8109dxgNw4qq8HHOCJ+3sHO+9AA1cwZTOLlqoTo=
github.com/rancher/cri v1.3.0-k3s.4/go.mod h1:Ht5T1dIKzm+4NExmb7wDVG6qR+j0xeXIjjhCv1d9geY=
github.com/rancher/cri-tools v1.17.0-k3s1 h1:jfu97FowbraTDc7b6fxWtO+dq+DU2oW+ABBQSEFiRb0=
github.com/rancher/cri-tools v1.17.0-k3s1/go.mod h1:bRTZttsvk+nCG8tSFs8D6UUx8CkMXR5TAsRLS0fXAqI=
github.com/rancher/dynamiclistener v0.2.0 h1:KucYwJXVVGhZ/NndfMCeQoCafT/VN7kvqSGgmlX8Lxk=

View File

@ -26,6 +26,9 @@ set -e
# If set to 'skip' will not create symlinks, 'force' will overwrite,
# default will symlink if command does not exist in path.
#
# - INSTALL_K3S_SKIP_ENABLE
# If set to true will not enable or start k3s service.
#
# - INSTALL_K3S_SKIP_START
# If set to true will not start k3s service.
#
@ -166,11 +169,6 @@ setup_env() {
${invalid_chars}"
fi
# --- set related files from system name ---
SERVICE_K3S=${SYSTEM_NAME}.service
UNINSTALL_K3S_SH=${SYSTEM_NAME}-uninstall.sh
KILLALL_K3S_SH=k3s-killall.sh
# --- use sudo if we are not already root ---
SUDO=sudo
if [ $(id -u) -eq 0 ]; then
@ -202,6 +200,11 @@ setup_env() {
SYSTEMD_DIR=/etc/systemd/system
fi
# --- set related files from system name ---
SERVICE_K3S=${SYSTEM_NAME}.service
UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh}
KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh}
# --- use service or environment location depending on systemd/openrc ---
if [ "${HAS_SYSTEMD}" = true ]; then
FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S}
@ -389,7 +392,7 @@ setup_binary() {
if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s 2>/dev/null 2>&1; then
if $SUDO grep SELINUX=enforcing /etc/selinux/config >/dev/null 2>&1; then
fatal "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, please install k3s_selinux RPM"
fatal "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, please install k3s-selinux RPM"
fi
fi
}
@ -425,7 +428,7 @@ create_symlinks() {
for cmd in kubectl crictl ctr; do
if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
which_cmd=$(which ${cmd} || true)
which_cmd=$(which ${cmd} 2>/dev/null || true)
if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then
info "Creating ${BIN_DIR}/${cmd} symlink to k3s"
$SUDO ln -sf k3s ${BIN_DIR}/${cmd}
@ -441,13 +444,13 @@ create_symlinks() {
# --- create killall script ---
create_killall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating killall script ${BIN_DIR}/${KILLALL_K3S_SH}"
$SUDO tee ${BIN_DIR}/${KILLALL_K3S_SH} >/dev/null << \EOF
info "Creating killall script ${KILLALL_K3S_SH}"
$SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF
#!/bin/sh
[ $(id -u) -eq 0 ] || exec sudo $0 $@
for bin in /var/lib/rancher/k3s/data/**/bin/; do
[ -d $bin ] && export PATH=$bin:$PATH
[ -d $bin ] && export PATH=$PATH:$bin:$bin/aux
done
set -x
@ -485,7 +488,7 @@ killtree() {
}
getshims() {
lsof | sed -e 's/^[^0-9]*//g; s/ */\t/g' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 | sort -n -u
ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1
}
killtree $({ set +x; } 2>/dev/null; getshims; set -x)
@ -520,20 +523,20 @@ ip link delete flannel.1
rm -rf /var/lib/cni/
iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore
EOF
$SUDO chmod 755 ${BIN_DIR}/${KILLALL_K3S_SH}
$SUDO chown root:root ${BIN_DIR}/${KILLALL_K3S_SH}
$SUDO chmod 755 ${KILLALL_K3S_SH}
$SUDO chown root:root ${KILLALL_K3S_SH}
}
# --- create uninstall script ---
create_uninstall() {
[ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return
info "Creating uninstall script ${BIN_DIR}/${UNINSTALL_K3S_SH}"
$SUDO tee ${BIN_DIR}/${UNINSTALL_K3S_SH} >/dev/null << EOF
info "Creating uninstall script ${UNINSTALL_K3S_SH}"
$SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF
#!/bin/sh
set -x
[ \$(id -u) -eq 0 ] || exec sudo \$0 \$@
${BIN_DIR}/${KILLALL_K3S_SH}
${KILLALL_K3S_SH}
if which systemctl; then
systemctl disable ${SYSTEM_NAME}
@ -548,7 +551,7 @@ rm -f ${FILE_K3S_SERVICE}
rm -f ${FILE_K3S_ENV}
remove_uninstall() {
rm -f ${BIN_DIR}/${UNINSTALL_K3S_SH}
rm -f ${UNINSTALL_K3S_SH}
}
trap remove_uninstall EXIT
@ -567,10 +570,10 @@ rm -rf /etc/rancher/k3s
rm -rf /var/lib/rancher/k3s
rm -rf /var/lib/kubelet
rm -f ${BIN_DIR}/k3s
rm -f ${BIN_DIR}/${KILLALL_K3S_SH}
rm -f ${KILLALL_K3S_SH}
EOF
$SUDO chmod 755 ${BIN_DIR}/${UNINSTALL_K3S_SH}
$SUDO chown root:root ${BIN_DIR}/${UNINSTALL_K3S_SH}
$SUDO chmod 755 ${UNINSTALL_K3S_SH}
$SUDO chown root:root ${UNINSTALL_K3S_SH}
}
# --- disable current service if loaded --
@ -704,6 +707,8 @@ openrc_start() {
# --- startup systemd or openrc service ---
service_enable_and_start() {
[ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return
[ "${HAS_SYSTEMD}" = true ] && systemd_enable
[ "${HAS_OPENRC}" = true ] && openrc_enable

57
package/k3s.spec Normal file
View File

@ -0,0 +1,57 @@
# vim: sw=4:ts=4:et
%define install_path /usr/bin
%define util_path %{_datadir}/k3s
%define install_sh %{util_path}/.install.sh
%define uninstall_sh %{util_path}/.uninstall.sh
Name: k3s
Version: %{k3s_version}
Release: %{k3s_release}%{?dist}
Summary: Lightweight Kubernetes
Group: System Environment/Base
License: ASL 2.0
URL: http://k3s.io
BuildRequires: systemd
Requires(post): k3s-selinux >= %{k3s_policyver}
%description
The certified Kubernetes distribution built for IoT & Edge computing.
%install
install -d %{buildroot}%{install_path}
install dist/artifacts/%{k3s_binary} %{buildroot}%{install_path}/k3s
install -d %{buildroot}%{util_path}
install install.sh %{buildroot}%{install_sh}
%post
# do not run install script on upgrade
echo post-install args: $@
if [ $1 == 1 ]; then
INSTALL_K3S_BIN_DIR=%{install_path} \
INSTALL_K3S_SKIP_DOWNLOAD=true \
INSTALL_K3S_SKIP_ENABLE=true \
UNINSTALL_K3S_SH=%{uninstall_sh} \
%{install_sh}
fi
%systemd_post k3s.service
exit 0
%postun
echo post-uninstall args: $@
# do not run uninstall script on upgrade
if [ $1 == 0 ]; then
%{uninstall_sh}
rm -rf %{util_path}
fi
exit 0
%files
%{install_path}/k3s
%{install_sh}
%changelog
* Mon Mar 2 2020 Erik Wilson <erik@rancher.com> 0.1-1
- Initial version

View File

@ -397,6 +397,7 @@ func get(envInfo *cmds.Agent) (*config.Node, error) {
nodeConfig := &config.Node{
Docker: envInfo.Docker,
DisableSELinux: envInfo.DisableSELinux,
ContainerRuntimeEndpoint: envInfo.ContainerRuntimeEndpoint,
FlannelBackend: controlConfig.FlannelBackend,
}
@ -474,6 +475,7 @@ func get(envInfo *cmds.Agent) (*config.Node, error) {
nodeConfig.AgentConfig.DisableCCM = controlConfig.DisableCCM
nodeConfig.AgentConfig.DisableNPC = controlConfig.DisableNPC
nodeConfig.AgentConfig.Rootless = envInfo.Rootless
nodeConfig.DisableSELinux = envInfo.DisableSELinux
return nodeConfig, nil
}

View File

@ -0,0 +1,15 @@
// +build !windows
package containerd
import (
"os/exec"
"syscall"
)
func addDeathSig(cmd *exec.Cmd) {
// not supported in this OS
cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGKILL,
}
}

View File

@ -0,0 +1,7 @@
package containerd
import "os/exec"
func addDeathSig(_ *exec.Cmd) {
// not supported in this OS
}

View File

@ -8,13 +8,13 @@ import (
"os"
"os/exec"
"path/filepath"
"syscall"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/namespaces"
"github.com/natefinch/lumberjack"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/agent/templates"
util2 "github.com/rancher/k3s/pkg/agent/util"
"github.com/rancher/k3s/pkg/daemons/config"
@ -66,9 +66,7 @@ func Run(ctx context.Context, cfg *config.Node) error {
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = stdOut
cmd.Stderr = stdErr
cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGKILL,
}
addDeathSig(cmd)
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "containerd: %s\n", err)
}
@ -170,6 +168,22 @@ func setupContainerdConfig(ctx context.Context, cfg *config.Node) error {
PrivateRegistryConfig: privRegistries,
}
selEnabled, selConfigured, err := selinuxStatus()
if err != nil {
return errors.Wrap(err, "failed to detect selinux")
}
if cfg.DisableSELinux {
containerdConfig.SELinuxEnabled = false
if selEnabled {
logrus.Warn("SELinux is enabled for system but has been disabled for containerd by override")
}
} else {
containerdConfig.SELinuxEnabled = selEnabled
}
if containerdConfig.SELinuxEnabled && !selConfigured {
logrus.Warnf("SELinux is enabled for k3s but process is not running in context '%s', k3s-selinux policy may need to be applied", SELinuxContextType)
}
containerdTemplateBytes, err := ioutil.ReadFile(cfg.Containerd.Template)
if err == nil {
logrus.Infof("Using containerd template at %s", cfg.Containerd.Template)

View File

@ -0,0 +1,27 @@
package containerd
import (
"github.com/opencontainers/selinux/go-selinux"
)
const (
SELinuxContextType = "container_runtime_t"
)
func selinuxStatus() (bool, bool, error) {
if !selinux.GetEnabled() {
return false, false, nil
}
label, err := selinux.CurrentLabel()
if err != nil {
return true, false, err
}
ctx, err := selinux.NewContext(label)
if err != nil {
return true, false, err
}
return true, ctx["type"] == SELinuxContextType, nil
}

View File

@ -0,0 +1,10 @@
package netpol
import (
"context"
daemonconfig "github.com/rancher/k3s/pkg/daemons/config"
)
func Run(ctx context.Context, nodeConfig *daemonconfig.Node) error {
panic("Netpol is not supported on windows ensure to pass --disable-network-policy")
}

View File

@ -1,3 +1,5 @@
// +build !windows
package netpol
import (

View File

@ -1,6 +1,8 @@
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
// - modified from https://github.com/cloudnativelabs/kube-router/blob/d6f9f31a7b/pkg/controllers/netpol/network_policy_controller.go
// +build !windows
package netpol
import (

View File

@ -1,6 +1,8 @@
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
// - modified from https://github.com/cloudnativelabs/kube-router/tree/d6f9f31a7b/pkg/utils
// +build !windows
package netpol
import (

View File

@ -1,3 +1,5 @@
// +build !windows
package syssetup
import (

View File

@ -0,0 +1,3 @@
package syssetup
func Configure() {}

View File

@ -10,6 +10,7 @@ import (
type ContainerdConfig struct {
NodeConfig *config.Node
IsRunningInUserNS bool
SELinuxEnabled bool
PrivateRegistryConfig *Registry
}
@ -20,6 +21,7 @@ const ContainerdConfigTemplate = `
[plugins.cri]
stream_server_address = "127.0.0.1"
stream_server_port = "10010"
enable_selinux = {{ .SELinuxEnabled }}
{{- if .IsRunningInUserNS }}
disable_cgroup = true

View File

@ -28,6 +28,7 @@ type Agent struct {
Rootless bool
RootlessAlreadyUnshared bool
WithNodeID bool
DisableSELinux bool
AgentShared
ExtraKubeletArgs cli.StringSlice
ExtraKubeProxyArgs cli.StringSlice
@ -127,6 +128,12 @@ var (
Usage: "(agent/node) Registering and starting kubelet with set of labels",
Value: &AgentConfig.Labels,
}
DisableSELinuxFlag = cli.BoolFlag{
Name: "disable-selinux",
Usage: "(agent/node) Disable SELinux in containerd if currently enabled",
Hidden: true,
Destination: &AgentConfig.DisableSELinux,
}
)
func NewAgentCommand(action func(ctx *cli.Context) error) cli.Command {
@ -169,6 +176,7 @@ func NewAgentCommand(action func(ctx *cli.Context) error) cli.Command {
NodeLabels,
NodeTaints,
DockerFlag,
DisableSELinuxFlag,
CRIEndpointFlag,
PauseImageFlag,
PrivateRegistryFlag,

View File

@ -216,6 +216,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
NodeLabels,
NodeTaints,
DockerFlag,
DisableSELinuxFlag,
CRIEndpointFlag,
PauseImageFlag,
PrivateRegistryFlag,

View File

@ -25,6 +25,7 @@ type Node struct {
Docker bool
ContainerRuntimeEndpoint string
NoFlannel bool
DisableSELinux bool
FlannelBackend string
FlannelConf string
FlannelConfOverride bool

View File

@ -16,6 +16,7 @@ const (
NodeArgsAnnotation = "k3s.io/node-args"
NodeEnvAnnotation = "k3s.io/node-env"
NodeConfigHashAnnotation = "k3s.io/node-config-hash"
OmittedValue = "********"
)
func getNodeArgs() (string, error) {
@ -31,7 +32,7 @@ func getNodeArgs() (string, error) {
for i, arg := range nodeArgsList {
if isSecret(arg) {
if i+1 < len(nodeArgsList) {
nodeArgsList[i+1] = ""
nodeArgsList[i+1] = OmittedValue
}
}
}
@ -52,7 +53,7 @@ func getNodeEnv() (string, error) {
}
for key := range k3sEnv {
if isSecret(key) {
k3sEnv[key] = ""
k3sEnv[key] = OmittedValue
}
}
k3sEnvJSON, err := json.Marshal(k3sEnv)

View File

@ -8,6 +8,7 @@ import (
"strings"
"github.com/rancher/k3s/pkg/token"
"github.com/rancher/k3s/pkg/util"
)
type entry struct {
@ -141,7 +142,7 @@ func writePasswords(passwdFile string, records [][]string) error {
}
defer out.Close()
if err := out.Chmod(0600); err != nil {
if err := util.SetFileModeForFile(out, 0600); err != nil {
return err
}

View File

@ -1,3 +1,5 @@
// +build !windows
package rootless
import (

View File

@ -1,3 +1,5 @@
// +build !windows
package rootless
import (

View File

@ -0,0 +1,5 @@
package rootless
func Rootless(stateDir string) error {
panic("Rootless not supported on windows")
}

View File

@ -1,3 +1,5 @@
// +build !windows
package rootlessports
import (

View File

@ -0,0 +1,10 @@
package rootlessports
import (
"context"
coreClients "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
)
func Register(ctx context.Context, serviceController coreClients.ServiceController, httpsPort int) error {
panic("Rootless is not supported on windows")
}

View File

@ -24,6 +24,7 @@ import (
"github.com/rancher/k3s/pkg/rootlessports"
"github.com/rancher/k3s/pkg/servicelb"
"github.com/rancher/k3s/pkg/static"
"github.com/rancher/k3s/pkg/util"
v1 "github.com/rancher/wrangler-api/pkg/generated/controllers/core/v1"
"github.com/rancher/wrangler/pkg/leader"
"github.com/rancher/wrangler/pkg/resolvehome"
@ -263,12 +264,12 @@ func writeKubeConfig(certs string, config *Config) error {
if config.ControlConfig.KubeConfigMode != "" {
mode, err := strconv.ParseInt(config.ControlConfig.KubeConfigMode, 8, 0)
if err == nil {
os.Chmod(kubeConfig, os.FileMode(mode))
util.SetFileModeForPath(kubeConfig, os.FileMode(mode))
} else {
logrus.Errorf("failed to set %s to mode %s: %v", kubeConfig, os.FileMode(mode), err)
}
} else {
os.Chmod(kubeConfig, os.FileMode(0600))
util.SetFileModeForPath(kubeConfig, os.FileMode(0600))
}
if kubeConfigSymlink != kubeConfig {

15
pkg/util/file.go Normal file
View File

@ -0,0 +1,15 @@
// +build !windows
package util
import (
"os"
)
func SetFileModeForPath(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
func SetFileModeForFile(file *os.File, mode os.FileMode) error {
return file.Chmod(mode)
}

13
pkg/util/file_windows.go Normal file
View File

@ -0,0 +1,13 @@
package util
import (
"os"
)
func SetFileModeForPath(name string, mode os.FileMode) error {
return nil
}
func SetFileModeForFile(file *os.File, mode os.FileMode) error {
return nil
}

View File

@ -46,12 +46,20 @@ STATIC_SQLITE="
-extldflags '-static -lm -ldl -lz -lpthread $DQLITE_STATIC_SQLITE'
"
TAGS="ctrd apparmor seccomp no_btrfs netcgo osusergo providerless $DQLITE_TAGS"
RUNC_TAGS="apparmor seccomp"
RUNC_STATIC="static"
if [ "$SELINUX" = "true" ]; then
TAGS="$TAGS selinux"
RUNC_TAGS="$RUNC_TAGS selinux"
fi
if [ "$STATIC_BUILD" != "true" ]; then
STATIC="
"
STATIC_SQLITE="
"
RUNC_STATIC=""
else
TAGS="static_build libsqlite3 $TAGS"
fi
@ -109,13 +117,16 @@ ln -s containerd ./bin/ctr
# echo Building containerd
# CGO_ENABLED=0 go build -tags "$TAGS" -ldflags "$VERSIONFLAGS $LDFLAGS $STATIC" -o bin/containerd ./cmd/containerd/
echo Building runc
make EXTRA_LDFLAGS="-w -s" BUILDTAGS="apparmor seccomp" -C ./vendor/github.com/opencontainers/runc static
rm -f ./vendor/github.com/opencontainers/runc/runc
make EXTRA_LDFLAGS="-w -s" BUILDTAGS="$RUNC_TAGS" -C ./vendor/github.com/opencontainers/runc $RUNC_STATIC
cp -f ./vendor/github.com/opencontainers/runc/runc ./bin/runc
echo Building containerd-shim
rm -f ./vendor/github.com/containerd/containerd/bin/containerd-shim
make -C ./vendor/github.com/containerd/containerd bin/containerd-shim
cp -f ./vendor/github.com/containerd/containerd/bin/containerd-shim ./bin/containerd-shim
echo Building containerd-shim-runc-v2
rm -f ./vendor/github.com/containerd/containerd/bin/containerd-shim-runc-v2
make -C ./vendor/github.com/containerd/containerd bin/containerd-shim-runc-v2
cp -f ./vendor/github.com/containerd/containerd/bin/containerd-shim-runc-v2 ./bin/containerd-shim-runc-v2

11
scripts/dispatch Executable file
View File

@ -0,0 +1,11 @@
#!/bin/sh
set -e
set -x
REPO="https://api.github.com/repos/rancher/k3s-upgrade/dispatches"
# send dispatch event to REPO
curl -XPOST -u "${PAT_USERNAME}:${PAT_TOKEN}" \
-H "Accept: application/vnd.github.everest-preview+json" \
-H "Content-Type: application/json" $REPO \
--data '{"event_type": "create_tag", "client_payload": {"tag":"'"$DRONE_TAG"'"}}'

78
scripts/package-rpm Executable file
View File

@ -0,0 +1,78 @@
#!/bin/bash
set -e -x
cd $(dirname $0)/..
ARCH=${DRONE_STAGE_ARCH:-$(arch)}
. ./scripts/version.sh
if [[ ! "$VERSION" =~ ^v[0-9]+\.[0-9]+\.[0-9]+(\-[^\+]*)?\+k3s.+$ ]]; then
echo "k3s version $VERSION does not match regex for rpm upload"
exit 0
fi
TMPDIR=$(mktemp -d)
cleanup() {
exit_code=$?
trap - EXIT INT
rm -rf ${TMPDIR}
exit ${exit_code}
}
trap cleanup EXIT INT
export HOME=${TMPDIR}
BIN_SUFFIX=""
if [ ${ARCH} = aarch64 ] || [ ${ARCH} = arm64 ]; then
BIN_SUFFIX="-arm64"
elif [ ${ARCH} = armv7l ] || [ ${ARCH} = arm ]; then
BIN_SUFFIX="-armhf"
fi
# capture version of k3s
k3s_version=$(sed -E -e 's/^v([^-+]*).*$/\1/' <<< $VERSION)
# capture pre-release and metadata information of k3s
k3s_release=$(sed -E -e 's/\+k3s/+/; s/\+/-/g; s/^[^-]*//; s/^--/dev-/; s/-+/./g; s/^\.+//; s/\.+$//;' <<< $VERSION)
# k3s-selinux policy version needed for functionality
k3s_policyver=0.1-1
rpmbuild \
--define "k3s_version ${k3s_version}" \
--define "k3s_release ${k3s_release}" \
--define "k3s_policyver ${k3s_policyver}" \
--define "k3s_binary k3s${BIN_SUFFIX}" \
--define "_sourcedir ${PWD}" \
--define "_specdir ${PWD}" \
--define "_builddir ${PWD}" \
--define "_srcrpmdir ${PWD}" \
--define "_rpmdir ${PWD}/dist/rpm" \
--define "_buildrootdir ${PWD}/.rpm-build" \
-bb package/k3s.spec
if ! grep "BEGIN PGP PRIVATE KEY BLOCK" <<<"$PRIVATE_KEY"; then
echo "PRIVATE_KEY not defined, skipping rpm sign and upload"
exit 0
fi
cat <<\EOF >~/.rpmmacros
%_signature gpg
%_gpg_name ci@rancher.com
EOF
gpg --import - <<<"$PRIVATE_KEY"
expect <<EOF
set timeout 60
spawn sh -c "rpmsign --addsign dist/rpm/**/k3s-*.rpm"
expect "Enter pass phrase:"
send -- "$PRIVATE_KEY_PASS_PHRASE\r"
expect eof
lassign [wait] _ _ _ code
exit \$code
EOF
if [ -z "$AWS_S3_BUCKET" ]; then
echo "AWS_S3_BUCKET skipping rpm upload"
exit 0
fi
rpm-s3 --bucket $AWS_S3_BUCKET dist/rpm/**/k3s-*.rpm

View File

@ -0,0 +1,31 @@
#!/bin/bash
set -ve
apk add -q -f curl libc6-compat tzdata
download_go
download_dqlite
# ---
cat <<EOF >/etc/profile.d/build.sh
export SELINUX=true
export DQLITE=true
export STATIC_BUILD=true
EOF
. /etc/profile.d/build.sh
# ---
. /tmp/docker-run
# ---
go get -u github.com/go-delve/delve/cmd/dlv
# ---
cat <<EOF >/etc/profile.d/docker.sh
export DOCKER_HOST=tcp://10.0.2.2:2375
EOF
. /etc/profile.d/docker.sh
# ---
(
if ! docker version --format '{{.Server.Version}}' >/tmp/docker-server-version; then
echo "WARNING: Unable to connect to the docker socket, to enable docker in vagrant run the following command on the host:"
echo "docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 127.0.0.1:2375:2375 alpine/socat TCP-LISTEN:2375,fork UNIX-CONNECT:/var/run/docker.sock"
else
echo "Using host docker server v$(cat /tmp/docker-server-version)"
fi
)

View File

@ -0,0 +1,37 @@
#!/bin/bash
set -e -x
TMPDIR=$(mktemp -d)
cleanup() {
exit_code=$?
trap - EXIT INT
rm -rf ${TMPDIR}
exit ${exit_code}
}
trap cleanup EXIT INT
export HOME=${TMPDIR}
gpg --batch --gen-key - <<EOF
%echo Generating a default key
Key-Type: default
Subkey-Type: default
Name-Real: Rancher
Name-Comment: CI
Name-Email: ci@rancher.com
Expire-Date: 0
# Key-Length: 4096
# Subkey-Length: 4096
Passphrase: $PRIVATE_KEY_PASS_PHRASE
# %no-protection
# %no-ask-passphrase
# Do a commit here, so that we can later print "done" :-)
%commit
%echo done
EOF
gpg --armor --export ci@rancher.com >public.key
gpg --armor --export-secret-key ci@rancher.com >private.key

View File

@ -0,0 +1,46 @@
#!/bin/bash
set -ve
download_go
# ---
cat <<EOF >/etc/profile.d/build.sh
export SELINUX=true
# export DQLITE=true
# export STATIC_BUILD=true
EOF
. /etc/profile.d/build.sh
# ---
sed -E 's|apk( -U)?( --no-cache)?( --repository [^ ]*)? add|yum install -y|g' -i /tmp/docker-run
sed -E 's/-dev/-devel/g' -i /tmp/docker-run
. /tmp/docker-run
# ---
go get -u github.com/go-delve/delve/cmd/dlv
# ---
# docker install instructions slightly changed from https://kubernetes.io/docs/setup/production-environment/container-runtimes/
# default "exec-opts": ["native.cgroupdriver=cgroupfs"], and set "selinux-enabled": true
yum remove -y docker docker-common
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum update -y && yum install -y containerd.io-1.2.10 docker-ce-19.03.4 docker-ce-cli-19.03.4
mkdir -p /etc/docker
cat > /etc/docker/daemon.json <<EOF
{
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"selinux-enabled": true,
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
# ---
systemctl daemon-reload
systemctl disable --now firewalld
systemctl disable --now docker
# ---
# set selinux to permissive for dev & testing purposes only
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

View File

@ -0,0 +1,7 @@
#!/bin/bash
set -e -x
yum install -y git expect yum-utils rpm-build rpm-sign python-deltarpm epel-release
yum install -y python2-pip
pip install git+git://github.com/Voronenko/rpm-s3.git@5695c6ad9a08548141d3713328e1bd3f533d137e

View File

@ -0,0 +1,26 @@
#!/bin/bash
set -ve
download_go
# ---
cat <<EOF >/etc/profile.d/build.sh
export SELINUX=true
# export DQLITE=true
# export STATIC_BUILD=true
EOF
. /etc/profile.d/build.sh
# ---
apt-get install -y \
build-essential \
pkg-config \
libseccomp-dev \
libsqlite3-dev \
libselinux1-dev \
zlib1g-dev \
# ---
sed -E 's|apk( -U)?( --no-cache)?( --repository [^ ]*)? add|apt-get install -y|g' -i /tmp/docker-run
. /tmp/docker-run
# ---
go get -u github.com/go-delve/delve/cmd/dlv
# ---

101
scripts/provision/vagrant Executable file
View File

@ -0,0 +1,101 @@
#!/bin/bash
set -ve
PROVISION="scripts/provision/$BOX/vagrant"
if [ ! -f /etc/vagrant_box_build_time ]; then
echo "This script should only be called during vagrant provisioning"
exit 1
fi
if [[ $HOME == /go/* ]]; then
echo "Must not launch vagrant from /go/"
exit 1
fi
cd
# --- Default to root user for vagrant ssh
cat <<\EOF >/etc/profile.d/root.sh
[ $EUID -ne 0 ] && exec sudo -i
EOF
# --- Setup go version
if [ -z "${GOPATH}" ]; then
GOPATH=$(realpath $HOME/../../../..)
echo "WARNING: Assuming GOPATH=$GOPATH"
else
echo "Using parent GOPATH=$GOPATH"
fi
# --- Setup basic env
cat <<EOF >/etc/profile.d/env.sh
export ARCH=amd64
export GO111MODULE=off
export GOPATH="$GOPATH"
export PATH=/usr/local/bin:$PATH:/usr/local/go/bin:$GOPATH/bin
export HOME="$HOME" && cd
EOF
. /etc/profile.d/env.sh
# --- Clean go cache
rm -rf .cache/go-build || true
# --- Set color prompt
sed -i 's|:/bin/ash$|:/bin/bash|g' /etc/passwd
cat <<\EOF >/etc/profile.d/color.sh
alias ls='ls --color=auto'
export PS1='\033[31m[ \033[90m\D{%F 🐮 %T}\033[31m ]\n\[\033[36m\]\u\[\033[m\]@\[\033[32m\]\h\[\033[35m\]:\[\033[33;1m\]\w\[\033[m\]\$ '
EOF
# --- Setup install script from docker run commands
mkdir -p ${GOPATH}/bin
mkdir -p /go
ln -sf $GOPATH/bin /go/bin
sed ':a;N;$!ba;s/\\\n/ /g' <Dockerfile.dapper | grep '^RUN ' | sed -e 's/^RUN //' >/tmp/docker-run
export BINDIR=/go/bin
export GOPATH=/go
export HOME=/tmp
# --- Add k3s motd
cat <<\EOF >/etc/motd
, ,
,-----------|'------'| |\ ____
/. '-'@ o|-' | | /___ \
|/| | .. | | | __ __) | ____
| .________.'----' | |/ /|__ < / __/
| || | || | < ___) |\__ \
\__|' \__|' |_|\_\_____/____/
EOF
# --- Utility function to download go
download_go() {
goversion=$(grep "^FROM " Dockerfile.dapper | sed -e 's/^FROM golang:\(.*\)-.*/\1/')
if [ -z "$goversion" ]; then
echo "Cannot find version of go to fetch"
return 1
fi
echo "Installing go $goversion"
curl -sL https://storage.googleapis.com/golang/go${goversion}.linux-${ARCH}.tar.gz | tar -xzf - -C /usr/local
}
# --- Utility function to download dqlite
download_dqlite() {
dqliteURL="https://github.com/$(grep dqlite-build Dockerfile.dapper | sed -e 's/^.*--from=\([^ ]*\).*$/\1/' -e 's|:|/releases/download/|')/dqlite-$ARCH.tgz"
if [ -z "$dqliteURL" ]; then
echo "Cannot find dqlite URL to fetch"
return 1
fi
mkdir -p /usr/src/
echo "Downloading DQLITE from $dqliteURL"
curl -sL $dqliteURL -o /usr/src/dqlite.tgz
}
# --- Run vagrant provision script if available
if [ ! -f "${PROVISION}" ]; then
echo 'WARNING: Unable to execute provision script "${PROVISION}"'
exit
fi
echo "running '${PROVISION}'..." && \
. ${PROVISION} && \
echo "finished '${PROVISION}'!"

View File

@ -13,19 +13,18 @@ mkdir -p $artifacts
# ---
[ "$ARCH" = 'arm' ] && \
early-exit "Skipping sonobuoy, images not available for $ARCH."
E2E_OUTPUT=$artifacts test-run-sonobuoy
# ---
if [ "$DRONE_BUILD_EVENT" = 'tag' ]; then
printf "\033[33mSkipping remaining tests on tag.\033[m\n"
exit 0
fi
[ "$DRONE_BUILD_EVENT" = 'tag' ] && \
early-exit 'Skipping remaining tests on tag.'
if [ "$ARCH" != 'amd64' ]; then
printf "\033[33mSkipping remaining tests, images not available for $ARCH.\033[m\n"
exit 0
fi
[ "$ARCH" != 'amd64' ] && \
early-exit "Skipping remaining tests, images not available for $ARCH."
# ---

View File

@ -437,6 +437,14 @@ export -f provision-cluster
# ---
early-exit() {
printf "\033[33m$1\033[m\n"
exit $2
}
export -f early-exit
# ---
run-test() {
export PROVISION_LOCK=$(mktemp)
./scripts/test-runner $@ &
@ -454,27 +462,27 @@ export -f run-test
# ---
e2e-test() {
local label=$label
if [ -n "$LABEL_SUFFIX" ]; then
label="$label-$LABEL_SUFFIX"
fi
local logOutput=
if [ -n "$E2E_OUTPUT" ]; then
logOutput=$E2E_OUTPUT/$logName
fi
LABEL=$label LOG_OUTPUT=$logOutput run-test $@
local label=$label
if [ -n "$LABEL_SUFFIX" ]; then
label="$label-$LABEL_SUFFIX"
fi
local logOutput=
if [ -n "$E2E_OUTPUT" ]; then
logOutput=$E2E_OUTPUT/$logName
fi
LABEL=$label LOG_OUTPUT=$logOutput run-test $@
}
# ---
run-e2e-tests() {
label=PARALLEL \
logName=e2e-STATUS-${ARCH}-parallel.log \
e2e-test ${sonobuoyParallelArgs[@]}
label=PARALLEL \
logName=e2e-STATUS-${ARCH}-parallel.log \
e2e-test ${sonobuoyParallelArgs[@]}
label=SERIAL \
logName=e2e-STATUS-${ARCH}-serial.log \
e2e-test ${sonobuoySerialArgs[@]}
label=SERIAL \
logName=e2e-STATUS-${ARCH}-serial.log \
e2e-test ${sonobuoySerialArgs[@]}
}
export -f run-e2e-tests

View File

@ -1,14 +1,38 @@
#!/bin/bash
all_services=(
coredns
local-path-provisioner
metrics-server
traefik
)
export NUM_SERVERS=1
export NUM_AGENTS=1
export SERVER_ARGS='--no-deploy=traefik,coredns,local-storage,metrics-server'
export WAIT_SERVICES="${all_services[@]}"
start-test() {
docker exec $(cat $TEST_DIR/servers/1/metadata/name) check-config || true
verify-valid-versions $(cat $TEST_DIR/servers/1/metadata/name)
verify-airgap-images $(cat $TEST_DIR/{servers,agents}/*/metadata/name)
}
export -f start-test
# -- check for changes to the airgap image list
verify-airgap-images() {
local airgap_image_list='scripts/airgap/image-list.txt'
for name in $@; do
docker exec $name crictl images -o json \
| jq -r '.images[].repoTags[0] | select(. != null)'
done | sort -u >$airgap_image_list.tmp
if ! diff $airgap_image_list{,.tmp}; then
echo '[ERROR] Failed airgap image check'
return 1
fi
}
export -f verify-airgap-images
# --- create a basic cluster and check for valid versions
LABEL=BASICS run-test

View File

@ -1,85 +0,0 @@
#!/bin/bash
set -ve
cd $(dirname $0)/..
if [ ! -f /etc/vagrant_box_build_time ]; then
echo "This script should only be called during vagrant provisioning"
exit 1
fi
ARCH=amd64
if [[ $HOME == /go/* ]]; then
echo "Must not launch vagrant from /go/"
exit 1
fi
if [ -z "${GOPATH}" ]; then
GOPATH=$(realpath $HOME/../../../..)
echo "WARNING: Assuming GOPATH=$GOPATH"
else
echo "Using parent GOPATH=$GOPATH"
fi
goversion=$(grep "^FROM " Dockerfile.dapper | sed -e 's/^FROM golang:\(.*\)-.*/\1/')
if [ -z "$goversion" ]; then
echo "Cannot find version of go to fetch"
exit 1
fi
echo "Installing go $goversion"
apk add -q -f curl libc6-compat tzdata
curl -sL https://storage.googleapis.com/golang/go${goversion}.linux-${ARCH}.tar.gz | tar -xzf - -C /usr/local
# ---
cat <<EOF >/etc/profile.d/build.sh
export STATIC_BUILD=true
EOF
# ---
cat <<EOF >/etc/profile.d/go.sh
export GOPATH=$GOPATH
export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
EOF
. /etc/profile.d/go.sh
# ---
sed -i 's|:/bin/ash$|:/bin/bash|g' /etc/passwd
cat <<\EOF >/etc/profile.d/color.sh
alias ls='ls --color=auto'
export PS1='\033[31m[ \033[90m\D{%F %T}\033[31m ]\n\[\033[36m\]\u\[\033[m\]🐮\[\033[32m\]\h:\[\033[33;1m\]\w\[\033[m\]$ '
EOF
# ---
mkdir -p ${GOPATH}/bin
mkdir -p /go
ln -s $GOPATH/bin /go/bin
sed ':a;N;$!ba;s/\\\n/ /g' <Dockerfile.dapper | grep '^RUN ' | sed -e 's/^RUN //' >/tmp/docker-run
export BINDIR=/go/bin
export GOPATH=/go
export HOME=/tmp && cd
. /tmp/docker-run
cd /go
go get github.com/rancher/trash
rm -rf /go
cd
# ---
cat <<EOF >/etc/profile.d/docker.sh
export DOCKER_HOST=tcp://10.0.2.2:2375
EOF
. /etc/profile.d/docker.sh
# ---
cat <<\EOF >>/etc/motd
, ,
,-----------|'------'| |\ ____
/. '-'@ o|-' | | /___ \
|/| | .. | | | __ __) | ____
| .________.'----' | |/ /|__ < / __/
| || | || | < ___) |\__ \
\__|' \__|' |_|\_\_____/____/
EOF
# ---
set +v
if ! docker version --format '{{.Server.Version}}' >/tmp/docker-server-version; then
echo "WARNING: Unable to connect to the docker socket, to enable docker in vagrant run the following command on the host:"
echo "docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 127.0.0.1:2375:2375 alpine/socat TCP-LISTEN:2375,fork UNIX-CONNECT:/var/run/docker.sock"
else
echo "Using host docker server v$(cat /tmp/docker-server-version)"
fi

View File

@ -1,20 +1,25 @@
#!/bin/bash
TREE_STATE=clean
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
DIRTY="-dirty"
TREE_STATE=dirty
fi
COMMIT=$(git log -n3 --pretty=format:"%H %ae" | grep -v ' drone@localhost$' | cut -f1 -d\ | head -1)
if [ -z "${COMMIT}" ]; then
COMMIT=$(git rev-parse HEAD)
fi
GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)}
ARCH=$(go env GOARCH)
ARCH=${ARCH:-$(go env GOARCH)}
SUFFIX="-${ARCH}"
GIT_TAG=$DRONE_TAG
TREE_STATE=clean
COMMIT=$DRONE_COMMIT
if [ -d .git ]; then
if [ -z "$GIT_TAG" ]; then
GIT_TAG=$(git tag -l --contains HEAD | head -n 1)
fi
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
DIRTY="-dirty"
TREE_STATE=dirty
fi
COMMIT=$(git log -n3 --pretty=format:"%H %ae" | grep -v ' drone@localhost$' | cut -f1 -d\ | head -1)
if [ -z "${COMMIT}" ]; then
COMMIT=$(git rev-parse HEAD || true)
fi
fi
VERSION_CONTAINERD=$(grep github.com/containerd/containerd go.mod | head -n1 | awk '{print $4}')
if [ -z "$VERSION_CONTAINERD" ]; then

View File

@ -36,6 +36,7 @@ import (
prototypes "github.com/gogo/protobuf/types"
ver "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
@ -242,7 +243,17 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N
if err != nil {
return nil, err
}
spec, err := c.Spec(ctx)
if err != nil {
return nil, err
}
for _, m := range mounts {
if spec.Linux != nil && spec.Linux.MountLabel != "" {
context := label.FormatMountLabel("", spec.Linux.MountLabel)
if context != "" {
m.Options = append(m.Options, context)
}
}
request.Rootfs = append(request.Rootfs, &types.Mount{
Type: m.Type,
Source: m.Source,

View File

@ -39,6 +39,7 @@ import (
"github.com/davecgh/go-spew/spew"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"golang.org/x/net/context"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
@ -173,6 +174,18 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
return nil, errors.Wrapf(err, "failed to generate container %q spec", id)
}
meta.ProcessLabel = spec.Process.SelinuxLabel
if config.GetLinux().GetSecurityContext().GetPrivileged() {
// If privileged don't set the SELinux label but still record it on the container so
// the unused MCS label can be release later
spec.Process.SelinuxLabel = ""
}
defer func() {
if retErr != nil {
_ = label.ReleaseLabel(spec.Process.SelinuxLabel)
}
}()
log.G(ctx).Debugf("Container %q spec: %#+v", id, spew.NewFormatter(spec))
// Set snapshotter before any other options.
@ -324,7 +337,7 @@ func (c *criService) CreateContainer(ctx context.Context, r *runtime.CreateConta
func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxPid uint32, config *runtime.ContainerConfig,
sandboxConfig *runtime.PodSandboxConfig, imageConfig *imagespec.ImageConfig, extraMounts []*runtime.Mount,
ociRuntime config.Runtime) (*runtimespec.Spec, error) {
ociRuntime config.Runtime) (retSpec *runtimespec.Spec, retErr error) {
specOpts := []oci.SpecOpts{
customopts.WithoutRunMount,
@ -366,11 +379,27 @@ func (c *criService) generateContainerSpec(id string, sandboxID string, sandboxP
specOpts = append(specOpts, oci.WithEnv(env))
securityContext := config.GetLinux().GetSecurityContext()
selinuxOpt := securityContext.GetSelinuxOptions()
processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt)
labelOptions := toLabel(securityContext.GetSelinuxOptions())
if len(labelOptions) == 0 {
// Use pod level SELinux config
if sandbox, err := c.sandboxStore.Get(sandboxID); err == nil {
labelOptions, err = label.DupSecOpt(sandbox.ProcessLabel)
if err != nil {
return nil, err
}
}
}
processLabel, mountLabel, err := label.InitLabels(labelOptions)
if err != nil {
return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
}
defer func() {
if retErr != nil {
_ = label.ReleaseLabel(processLabel)
}
}()
specOpts = append(specOpts, customopts.WithMounts(c.os, config, extraMounts, mountLabel))
if !c.config.DisableProcMount {

View File

@ -298,47 +298,55 @@ func (c *criService) ensureImageExists(ctx context.Context, ref string, config *
return &newImage, nil
}
func initSelinuxOpts(selinuxOpt *runtime.SELinuxOption) (string, string, error) {
if selinuxOpt == nil {
return "", "", nil
func toLabel(selinuxOptions *runtime.SELinuxOption) (labels []string) {
if selinuxOptions == nil {
return nil
}
// Should ignored selinuxOpts if they are incomplete.
if selinuxOpt.GetUser() == "" ||
selinuxOpt.GetRole() == "" ||
selinuxOpt.GetType() == "" {
return "", "", nil
if selinuxOptions.User != "" {
labels = append(labels, "user:"+selinuxOptions.User)
}
if selinuxOptions.Role != "" {
labels = append(labels, "role:"+selinuxOptions.Role)
}
if selinuxOptions.Type != "" {
labels = append(labels, "type:"+selinuxOptions.Type)
}
if selinuxOptions.Level != "" {
labels = append(labels, "level:"+selinuxOptions.Level)
}
// make sure the format of "level" is correct.
ok, err := checkSelinuxLevel(selinuxOpt.GetLevel())
if err != nil || !ok {
return "", "", err
}
return
}
labelOpts := fmt.Sprintf("%s:%s:%s:%s",
selinuxOpt.GetUser(),
selinuxOpt.GetRole(),
selinuxOpt.GetType(),
selinuxOpt.GetLevel())
func initLabelsFromOpt(selinuxOpts *runtime.SELinuxOption) (string, string, error) {
return initLabels(toLabel(selinuxOpts))
}
options, err := label.DupSecOpt(labelOpts)
if err != nil {
return "", "", err
func initLabels(options []string) (string, string, error) {
for _, opt := range options {
if strings.HasPrefix(opt, "level:") {
if err := checkSelinuxLevel(strings.TrimPrefix(opt, "level:")); err != nil {
return "", "", err
}
}
}
return label.InitLabels(options)
}
func checkSelinuxLevel(level string) (bool, error) {
func checkSelinuxLevel(level string) error {
if len(level) == 0 {
return true, nil
return nil
}
matched, err := regexp.MatchString(`^s\d(-s\d)??(:c\d{1,4}((.c\d{1,4})?,c\d{1,4})*(.c\d{1,4})?(,c\d{1,4}(.c\d{1,4})?)*)?$`, level)
if err != nil || !matched {
return false, errors.Wrapf(err, "the format of 'level' %q is not correct", level)
if err != nil {
return errors.Wrapf(err, "the format of 'level' %q is not correct", level)
}
return true, nil
if !matched {
return fmt.Errorf("the format of 'level' %q is not correct", level)
}
return nil
}
// isInCRIMounts checks whether a destination is in CRI mount list.

View File

@ -34,6 +34,7 @@ import (
"github.com/davecgh/go-spew/spew"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
@ -158,6 +159,18 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
return nil, errors.Wrap(err, "failed to generate sandbox container spec")
}
log.G(ctx).Debugf("Sandbox container %q spec: %#+v", id, spew.NewFormatter(spec))
sandbox.ProcessLabel = spec.Process.SelinuxLabel
defer func() {
if retErr != nil {
_ = label.ReleaseLabel(sandbox.ProcessLabel)
}
}()
if securityContext.GetPrivileged() {
// If privileged don't set selinux label, but we still record the MCS label so that
// the unused label can be freed later.
spec.Process.SelinuxLabel = ""
}
var specOpts []oci.SpecOpts
userstr, err := generateUserString(
@ -328,7 +341,7 @@ func (c *criService) RunPodSandbox(ctx context.Context, r *runtime.RunPodSandbox
}
func (c *criService) generateSandboxContainerSpec(id string, config *runtime.PodSandboxConfig,
imageConfig *imagespec.ImageConfig, nsPath string, runtimePodAnnotations []string) (*runtimespec.Spec, error) {
imageConfig *imagespec.ImageConfig, nsPath string, runtimePodAnnotations []string) (retSpec *runtimespec.Spec, retErr error) {
// Creates a spec Generator with the default spec.
// TODO(random-liu): [P1] Compare the default settings with docker and containerd default.
specOpts := []oci.SpecOpts{
@ -403,11 +416,15 @@ func (c *criService) generateSandboxContainerSpec(id string, config *runtime.Pod
},
}))
selinuxOpt := securityContext.GetSelinuxOptions()
processLabel, mountLabel, err := initSelinuxOpts(selinuxOpt)
processLabel, mountLabel, err := initLabelsFromOpt(securityContext.GetSelinuxOptions())
if err != nil {
return nil, errors.Wrapf(err, "failed to init selinux options %+v", securityContext.GetSelinuxOptions())
}
defer func() {
if retErr != nil && processLabel != "" {
_ = label.ReleaseLabel(processLabel)
}
}()
supplementalGroups := securityContext.GetSupplementalGroups()
specOpts = append(specOpts,

View File

@ -25,6 +25,7 @@ import (
"github.com/containerd/containerd"
"github.com/containerd/containerd/plugin"
"github.com/containerd/cri/pkg/store/label"
cni "github.com/containerd/go-cni"
runcapparmor "github.com/opencontainers/runc/libcontainer/apparmor"
runcseccomp "github.com/opencontainers/runc/libcontainer/seccomp"
@ -104,14 +105,15 @@ type criService struct {
// NewCRIService returns a new instance of CRIService
func NewCRIService(config criconfig.Config, client *containerd.Client) (CRIService, error) {
var err error
labels := label.NewStore()
c := &criService{
config: config,
client: client,
apparmorEnabled: runcapparmor.IsEnabled() && !config.DisableApparmor,
seccompEnabled: runcseccomp.IsEnabled(),
os: osinterface.RealOS{},
sandboxStore: sandboxstore.NewStore(),
containerStore: containerstore.NewStore(),
sandboxStore: sandboxstore.NewStore(labels),
containerStore: containerstore.NewStore(labels),
imageStore: imagestore.NewStore(client),
snapshotStore: snapshotstore.NewStore(),
sandboxNameIndex: registrar.NewRegistrar(),

View File

@ -20,6 +20,7 @@ import (
"sync"
"github.com/containerd/containerd"
"github.com/containerd/cri/pkg/store/label"
"github.com/docker/docker/pkg/truncindex"
runtime "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
@ -101,13 +102,15 @@ type Store struct {
lock sync.RWMutex
containers map[string]Container
idIndex *truncindex.TruncIndex
labels *label.Store
}
// NewStore creates a container store.
func NewStore() *Store {
func NewStore(labels *label.Store) *Store {
return &Store{
containers: make(map[string]Container),
idIndex: truncindex.NewTruncIndex([]string{}),
labels: labels,
}
}
@ -119,6 +122,9 @@ func (s *Store) Add(c Container) error {
if _, ok := s.containers[c.ID]; ok {
return store.ErrAlreadyExist
}
if err := s.labels.Reserve(c.ProcessLabel); err != nil {
return err
}
if err := s.idIndex.Add(c.ID); err != nil {
return err
}
@ -165,6 +171,7 @@ func (s *Store) Delete(id string) {
// So we need to return if there are error.
return
}
s.labels.Release(s.containers[id].ProcessLabel)
s.idIndex.Delete(id) // nolint: errcheck
delete(s.containers, id)
}

View File

@ -61,6 +61,8 @@ type Metadata struct {
// StopSignal is the system call signal that will be sent to the container to exit.
// TODO(random-liu): Add integration test for stop signal.
StopSignal string
// ProcessLabel is the SELinux process label for the container
ProcessLabel string
}
// MarshalJSON encodes Metadata into bytes in json format.

View File

@ -0,0 +1,90 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package label
import (
"sync"
"github.com/opencontainers/selinux/go-selinux"
)
type Store struct {
sync.Mutex
levels map[string]int
Releaser func(string)
Reserver func(string)
}
func NewStore() *Store {
return &Store{
levels: map[string]int{},
Releaser: selinux.ReleaseLabel,
Reserver: selinux.ReserveLabel,
}
}
func (s *Store) Reserve(label string) error {
s.Lock()
defer s.Unlock()
context, err := selinux.NewContext(label)
if err != nil {
return err
}
level := context["level"]
// no reason to count empty
if level == "" {
return nil
}
if _, ok := s.levels[level]; !ok {
s.Reserver(label)
}
s.levels[level]++
return nil
}
func (s *Store) Release(label string) {
s.Lock()
defer s.Unlock()
context, err := selinux.NewContext(label)
if err != nil {
return
}
level := context["level"]
if level == "" {
return
}
count, ok := s.levels[level]
if !ok {
return
}
switch {
case count == 1:
s.Releaser(label)
delete(s.levels, level)
case count < 1:
delete(s.levels, level)
case count > 1:
s.levels[level] = count - 1
}
}

View File

@ -61,6 +61,8 @@ type Metadata struct {
RuntimeHandler string
// CNIresult resulting configuration for attached network namespace interfaces
CNIResult *cni.CNIResult
// ProcessLabel is the SELinux process label for the container
ProcessLabel string
}
// MarshalJSON encodes Metadata into bytes in json format.

View File

@ -20,6 +20,7 @@ import (
"sync"
"github.com/containerd/containerd"
"github.com/containerd/cri/pkg/store/label"
"github.com/docker/docker/pkg/truncindex"
"github.com/containerd/cri/pkg/netns"
@ -62,13 +63,15 @@ type Store struct {
lock sync.RWMutex
sandboxes map[string]Sandbox
idIndex *truncindex.TruncIndex
labels *label.Store
}
// NewStore creates a sandbox store.
func NewStore() *Store {
func NewStore(labels *label.Store) *Store {
return &Store{
sandboxes: make(map[string]Sandbox),
idIndex: truncindex.NewTruncIndex([]string{}),
labels: labels,
}
}
@ -79,6 +82,9 @@ func (s *Store) Add(sb Sandbox) error {
if _, ok := s.sandboxes[sb.ID]; ok {
return store.ErrAlreadyExist
}
if err := s.labels.Reserve(sb.ProcessLabel); err != nil {
return err
}
if err := s.idIndex.Add(sb.ID); err != nil {
return err
}
@ -125,6 +131,7 @@ func (s *Store) Delete(id string) {
// So we need to return if there are error.
return
}
s.labels.Release(s.sandboxes[id].ProcessLabel)
s.idIndex.Delete(id) // nolint: errcheck
delete(s.sandboxes, id)
}

5
vendor/modules.txt vendored
View File

@ -151,7 +151,7 @@ github.com/container-storage-interface/spec/lib/go/csi
github.com/containerd/cgroups
# github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1 => github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50
github.com/containerd/console
# github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 => github.com/rancher/containerd v1.3.3-k3s1
# github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69 => github.com/rancher/containerd v1.3.3-k3s2
github.com/containerd/containerd
github.com/containerd/containerd/api/events
github.com/containerd/containerd/api/services/containers/v1
@ -288,7 +288,7 @@ github.com/containerd/continuity/pathdriver
github.com/containerd/continuity/proto
github.com/containerd/continuity/syscallx
github.com/containerd/continuity/sysx
# github.com/containerd/cri v0.0.0-00010101000000-000000000000 => github.com/rancher/cri v1.3.0-k3s.3
# github.com/containerd/cri v0.0.0-00010101000000-000000000000 => github.com/rancher/cri v1.3.0-k3s.4
github.com/containerd/cri
github.com/containerd/cri/pkg/annotations
github.com/containerd/cri/pkg/api/runtimeoptions/v1
@ -306,6 +306,7 @@ github.com/containerd/cri/pkg/server/io
github.com/containerd/cri/pkg/store
github.com/containerd/cri/pkg/store/container
github.com/containerd/cri/pkg/store/image
github.com/containerd/cri/pkg/store/label
github.com/containerd/cri/pkg/store/sandbox
github.com/containerd/cri/pkg/store/snapshot
github.com/containerd/cri/pkg/util