Update flannel to v0.24.0 and remove multiclustercidr flag (#9075)

* update flannel to v0.24.0

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

* remove multiclustercidr flag

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>

---------

Signed-off-by: galal-hussein <hussein.galal.ahmed.11@gmail.com>
This commit is contained in:
Hussein Galal 2023-12-20 00:25:38 +02:00 committed by GitHub
parent 7101af36bb
commit 9411196406
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 13 additions and 495 deletions

19
go.mod
View File

@ -12,7 +12,6 @@ replace (
github.com/docker/docker => github.com/docker/docker v24.0.0-rc.2.0.20230801142700-69c9adb7d386+incompatible
github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34
github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.9.0
github.com/flannel-io/flannel => github.com/k3s-io/flannel v0.23.0-k3s1
github.com/golang/protobuf => github.com/golang/protobuf v1.5.3
github.com/googleapis/gax-go/v2 => github.com/googleapis/gax-go/v2 v2.12.0
github.com/juju/errors => github.com/k3s-io/nocode v0.0.0-20200630202308-cb097102c09f
@ -101,7 +100,7 @@ require (
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/docker/docker v24.0.7+incompatible
github.com/erikdubbelboer/gspt v0.0.0-20190125194910-e68493906b83
github.com/flannel-io/flannel v0.23.0
github.com/flannel-io/flannel v0.24.0
github.com/go-bindata/go-bindata v3.1.2+incompatible
github.com/go-sql-driver/mysql v1.7.1
github.com/go-test/deep v1.0.7
@ -144,7 +143,7 @@ require (
go.etcd.io/etcd/etcdutl/v3 v3.5.9
go.etcd.io/etcd/server/v3 v3.5.10
go.uber.org/zap v1.24.0
golang.org/x/crypto v0.15.0
golang.org/x/crypto v0.17.0
golang.org/x/net v0.19.0
golang.org/x/sync v0.4.0
golang.org/x/sys v0.15.0
@ -374,15 +373,15 @@ require (
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.42.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.45.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/sdk v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/sdk v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
go.uber.org/atomic v1.10.0 // indirect

4
go.sum
View File

@ -553,6 +553,8 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/flannel-io/flannel v0.24.0 h1:dsVWmcWTn9ujbNfoTR0+Epkqfq1/bGWOCP5vVCTEBLI=
github.com/flannel-io/flannel v0.24.0/go.mod h1:GvvhQS/xd5QM6oc9yeVz8KBbp5hWJZgPtwTKgpuLUPI=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
@ -891,8 +893,6 @@ github.com/k3s-io/etcd/raft/v3 v3.5.9-k3s1 h1:nlix2+EM1UDofoHgp/X2VHzMvJW7oYbZbE
github.com/k3s-io/etcd/raft/v3 v3.5.9-k3s1/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg=
github.com/k3s-io/etcd/server/v3 v3.5.9-k3s1 h1:B3039IkTPnwQEt4tIMjC6yd6b1Q3Z9ZZe8rfaBPfbXo=
github.com/k3s-io/etcd/server/v3 v3.5.9-k3s1/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g=
github.com/k3s-io/flannel v0.23.0-k3s1 h1:JCROHniLnHNcPuQi2IHfDpyDMGG2yuC1znLVJvdnirc=
github.com/k3s-io/flannel v0.23.0-k3s1/go.mod h1:QSf9ZPo+azj/3cpDqMwDjmLk7+SsM7h3wPT8iExsgis=
github.com/k3s-io/helm-controller v0.15.4 h1:l4DWmUWpphbtwmuXGtpr5Rql/2NaCLSv4ZD8HlND9uY=
github.com/k3s-io/helm-controller v0.15.4/go.mod h1:BgCPBQblj/Ect4Q7/Umf86WvyDjdG/34D+n8wfXtoeM=
github.com/k3s-io/kine v0.11.0 h1:7tS0H9yBDxXiy1BgEEkBWLswwG/q4sARPTHdxOMz1qw=

View File

@ -501,7 +501,6 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
SELinux: envInfo.EnableSELinux,
ContainerRuntimeEndpoint: envInfo.ContainerRuntimeEndpoint,
ImageServiceEndpoint: envInfo.ImageServiceEndpoint,
MultiClusterCIDR: controlConfig.MultiClusterCIDR,
FlannelBackend: controlConfig.FlannelBackend,
FlannelIPv6Masq: controlConfig.FlannelIPv6Masq,
FlannelExternalIP: controlConfig.FlannelExternalIP,

View File

@ -47,7 +47,7 @@ var (
FlannelExternalIPv6Annotation = FlannelBaseAnnotation + "/public-ipv6-overwrite"
)
func flannel(ctx context.Context, flannelIface *net.Interface, flannelConf, kubeConfigFile string, flannelIPv6Masq bool, multiClusterCIDR bool, netMode int) error {
func flannel(ctx context.Context, flannelIface *net.Interface, flannelConf, kubeConfigFile string, flannelIPv6Masq bool, netMode int) error {
extIface, err := LookupExtInterface(flannelIface, netMode)
if err != nil {
return errors.Wrap(err, "failed to find the interface")

View File

@ -74,7 +74,7 @@ func Run(ctx context.Context, nodeConfig *config.Node, nodes typedcorev1.NodeInt
return errors.Wrap(err, "failed to check netMode for flannel")
}
go func() {
err := flannel(ctx, nodeConfig.FlannelIface, nodeConfig.FlannelConfFile, nodeConfig.AgentConfig.KubeConfigKubelet, nodeConfig.FlannelIPv6Masq, nodeConfig.MultiClusterCIDR, netMode)
err := flannel(ctx, nodeConfig.FlannelIface, nodeConfig.FlannelConfFile, nodeConfig.AgentConfig.KubeConfigKubelet, nodeConfig.FlannelIPv6Masq, netMode)
if err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatalf("flannel exited: %v", err)
}

View File

@ -64,7 +64,6 @@ type Server struct {
AdvertisePort int
DisableScheduler bool
ServerURL string
MultiClusterCIDR bool
FlannelBackend string
FlannelIPv6Masq bool
FlannelExternalIP bool
@ -220,11 +219,6 @@ var ServerFlags = []cli.Flag{
Destination: &ServerConfig.FlannelBackend,
Value: "vxlan",
},
&cli.BoolFlag{
Name: "multi-cluster-cidr",
Usage: "(experimental/networking) Enable multiClusterCIDR",
Destination: &ServerConfig.MultiClusterCIDR,
},
&cli.BoolFlag{
Name: "flannel-ipv6-masq",
Usage: "(networking) Enable IPv6 masquerading for pod",

View File

@ -150,7 +150,6 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
serverConfig.ControlConfig.Datastore.BackendTLSConfig.KeyFile = cfg.DatastoreKeyFile
serverConfig.ControlConfig.AdvertiseIP = cfg.AdvertiseIP
serverConfig.ControlConfig.AdvertisePort = cfg.AdvertisePort
serverConfig.ControlConfig.MultiClusterCIDR = cfg.MultiClusterCIDR
serverConfig.ControlConfig.FlannelBackend = cfg.FlannelBackend
serverConfig.ControlConfig.FlannelIPv6Masq = cfg.FlannelIPv6Masq
serverConfig.ControlConfig.FlannelExternalIP = cfg.FlannelExternalIP
@ -192,9 +191,6 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
logrus.Info("ETCD snapshots are disabled")
}
if cfg.MultiClusterCIDR {
logrus.Warn("multiClusterCIDR alpha feature will be removed in future releases")
}
if cfg.ClusterResetRestorePath != "" && !cfg.ClusterReset {
return errors.New("invalid flag use; --cluster-reset required with --cluster-reset-restore-path")
}

View File

@ -40,7 +40,6 @@ type Node struct {
ImageServiceEndpoint string
NoFlannel bool
SELinux bool
MultiClusterCIDR bool
FlannelBackend string
FlannelConfFile string
FlannelConfOverride bool
@ -149,7 +148,6 @@ type CriticalControlArgs struct {
DisableNPC bool `cli:"disable-network-policy"`
DisableServiceLB bool `cli:"disable-service-lb"`
EncryptSecrets bool `cli:"secrets-encryption"`
MultiClusterCIDR bool `cli:"multi-cluster-cidr"`
FlannelBackend string `cli:"flannel-backend"`
FlannelIPv6Masq bool `cli:"flannel-ipv6-masq"`
FlannelExternalIP bool `cli:"flannel-external-ip"`

View File

@ -113,10 +113,6 @@ func controllerManager(ctx context.Context, cfg *config.Control) error {
"cluster-signing-legacy-unknown-cert-file": runtime.SigningServerCA,
"cluster-signing-legacy-unknown-key-file": runtime.ServerCAKey,
}
if cfg.MultiClusterCIDR {
argsMap["cidr-allocator-type"] = "MultiCIDRRangeAllocator"
argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "MultiCIDRRangeAllocator=true")
}
if cfg.NoLeaderElect {
argsMap["leader-elect"] = "false"
}
@ -219,10 +215,6 @@ func apiServer(ctx context.Context, cfg *config.Control) error {
argsMap["enable-admission-plugins"] = "NodeRestriction"
argsMap["anonymous-auth"] = "false"
argsMap["profiling"] = "false"
if cfg.MultiClusterCIDR {
argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "MultiCIDRRangeAllocator=true")
argsMap["runtime-config"] = "networking.k8s.io/v1alpha1"
}
if cfg.EncryptSecrets {
argsMap["encryption-provider-config"] = runtime.EncryptionConfig
argsMap["encryption-provider-config-automatic-reload"] = "true"
@ -341,10 +333,6 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control) error {
argsMap["controllers"] = argsMap["controllers"] + ",-cloud-node,-cloud-node-lifecycle"
argsMap["secure-port"] = "0"
}
if cfg.MultiClusterCIDR {
argsMap["cidr-allocator-type"] = "MultiCIDRRangeAllocator"
argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "MultiCIDRRangeAllocator=true")
}
if cfg.DisableServiceLB {
argsMap["controllers"] = argsMap["controllers"] + ",-service"
}

View File

@ -1,154 +0,0 @@
ENV['VAGRANT_NO_PARALLEL'] = 'no'
NODE_ROLES = (ENV['E2E_NODE_ROLES'] ||
["server-0", "server-1", "server-2", "agent-0"])
NODE_BOXES = (ENV['E2E_NODE_BOXES'] ||
['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004'])
GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master")
RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "")
GOCOVER = (ENV['E2E_GOCOVER'] || "")
NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i
NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i
IP_FAMILY = (ENV['E2E_IP_FAMILY'] || "ipv4")
NETWORK4_PREFIX = "10.10.10"
NETWORK6_PREFIX = "fd11:decf:c0ff:ee"
install_type = ""
def provision(vm, role, role_num, node_num)
vm.box = NODE_BOXES[node_num]
vm.hostname = role
node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}"
node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}"
node_ip6_gw = "#{NETWORK6_PREFIX}::1"
# Only works with libvirt, which allows IPv4 + IPv6 on a single network/interface
vm.network "private_network",
:ip => node_ip4,
:netmask => "255.255.255.0",
:libvirt__guest_ipv6 => "yes",
:libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1",
:libvirt__ipv6_prefix => "64"
scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts"
vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb"
load vagrant_defaults
defaultOSConfigure(vm)
addCoverageDir(vm, role, GOCOVER)
vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, vm.box.to_s]
install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH)
vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io"
if role.include?("server") && role_num == 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
if IP_FAMILY.include?("ipv4")
k3s.config = <<~YAML
node-external-ip: #{node_ip4}
node-ip: #{node_ip4}
cluster-init: true
token: vagrant
cluster-cidr: 10.42.0.0/16
service-cidr: 10.43.0.0/16
bind-address: #{NETWORK4_PREFIX}.100
multi-cluster-cidr: true
flannel-iface: eth1
YAML
else
k3s.config = <<~YAML
node-external-ip: #{node_ip6}
node-ip: #{node_ip6}
cluster-init: true
token: vagrant
cluster-cidr: 2001:cafe:42::/56
service-cidr: 2001:cafe:43::/112
bind-address: #{NETWORK6_PREFIX}::10
multi-cluster-cidr: true
flannel-ipv6-masq: true
flannel-iface: eth1
YAML
end
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
elsif role.include?("server") && role_num != 0
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "server "
if IP_FAMILY.include?("ipv4")
k3s.config = <<~YAML
node-external-ip: #{node_ip4}
node-ip: #{node_ip4}
server: https://#{NETWORK4_PREFIX}.100:6443
token: vagrant
cluster-cidr: 10.42.0.0/16
service-cidr: 10.43.0.0/16
multi-cluster-cidr: true
flannel-iface: eth1
YAML
else
k3s.config = <<~YAML
node-external-ip: #{node_ip6}
node-ip: #{node_ip6}
server: https://[#{NETWORK6_PREFIX}::10]:6443
token: vagrant
cluster-cidr: 2001:cafe:42::/56
service-cidr: 2001:cafe:43::/112
multi-cluster-cidr: true
flannel-ipv6-masq: true
flannel-iface: eth1
YAML
end
k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type]
end
end
if role.include?("agent")
vm.provision :k3s, run: 'once' do |k3s|
k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321
k3s.args = "agent "
if IP_FAMILY.include?("ipv4")
k3s.config = <<~YAML
node-external-ip: #{node_ip4}
node-ip: #{node_ip4}
server: https://#{NETWORK4_PREFIX}.100:6443
token: vagrant
flannel-iface: eth1
YAML
else
k3s.config = <<~YAML
node-external-ip: #{node_ip6}
node-ip: #{node_ip6}
server: https://[#{NETWORK6_PREFIX}::10]:6443
token: vagrant
flannel-iface: eth1
YAML
end
k3s.env = ["K3S_KUBECONFIG_MODE=0644", "INSTALL_K3S_SKIP_START=true", install_type]
end
end
end
Vagrant.configure("2") do |config|
config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt"]
config.vm.provider "libvirt" do |v|
v.cpus = NODE_CPUS
v.memory = NODE_MEMORY
end
if NODE_ROLES.kind_of?(String)
NODE_ROLES = NODE_ROLES.split(" ", -1)
end
if NODE_BOXES.kind_of?(String)
NODE_BOXES = NODE_BOXES.split(" ", -1)
end
# Must iterate on the index, vagrant does not understand iterating
# over the node roles themselves
NODE_ROLES.length.times do |i|
name = NODE_ROLES[i]
role_num = name.split("-", -1).pop.to_i
config.vm.define name do |node|
provision(node.vm, name, role_num, i)
end
end
end

View File

@ -1,302 +0,0 @@
package multiclustercidr
import (
"flag"
"fmt"
"os"
"strings"
"testing"
"github.com/k3s-io/k3s/tests/e2e"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64
var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system")
var serverCount = flag.Int("serverCount", 3, "number of server nodes")
var agentCount = flag.Int("agentCount", 1, "number of agent nodes")
var hardened = flag.Bool("hardened", false, "true or false")
var ci = flag.Bool("ci", false, "running on CI")
var local = flag.Bool("local", false, "deploy a locally built K3s binary")
func Test_E2EMultiClusterCIDR(t *testing.T) {
flag.Parse()
RegisterFailHandler(Fail)
suiteConfig, reporterConfig := GinkgoConfiguration()
RunSpecs(t, "MultiClusterCIDR Test Suite", suiteConfig, reporterConfig)
}
var (
kubeConfigFile string
serverNodeNames []string
agentNodeNames []string
)
var _ = ReportAfterEach(e2e.GenReport)
var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {
When("Cluster with IPv4 only is created", func() {
It("Starts up IPv4 setup with no issues", func() {
var err error
os.Setenv("E2E_IP_FAMILY", "ipv4")
defer os.Unsetenv("E2E_IP_FAMILY")
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each node has IPv4", func() {
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
Expect(node.IPv4).Should(ContainSubstring("10.10.10"))
}
})
It("Verifies that each pod has IPv4", func() {
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podIPs {
Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name)
}
})
It("Add new CIDR", func() {
_, err := e2e.DeployWorkload("cluster-cidr.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("10.248.0.0"))
})
It("Restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each pod of agent-0 has IPv4 from the new CIDR", func() {
pods, err := e2e.ParsePods(kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if pod.Node == "agent-0" {
Expect(pod.IP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name)
}
}
})
It("Destroy Cluster", func() {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
if os.Getenv("E2E_GOCOVER") != "" {
Expect(os.Rename("coverage.out", "coverage-ipv4.out")).To(Succeed())
}
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
})
})
When("Cluster with IPv6 only is created", func() {
It("Starts up IPv6 setup with no issues", func() {
var err error
os.Setenv("E2E_IP_FAMILY", "ipv6")
defer os.Unsetenv("E2E_IP_FAMILY")
if *local {
serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount)
} else {
serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount)
}
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
fmt.Println("CLUSTER CONFIG")
fmt.Println("OS:", *nodeOS)
fmt.Println("Server Nodes:", serverNodeNames)
fmt.Println("Agent Nodes:", agentNodeNames)
kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each node has IPv6", func() {
nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeIPs {
Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff"))
}
})
It("Verifies that each pod has IPv6", func() {
podIPs, err := e2e.GetPodIPs(kubeConfigFile)
Expect(err).NotTo(HaveOccurred())
for _, pod := range podIPs {
Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name)
}
})
It("Add new CIDR", func() {
_, err := e2e.DeployWorkload("cluster-cidr-ipv6.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred())
Eventually(func() (string, error) {
cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile
return e2e.RunCommand(cmd)
}, "120s", "5s").Should(ContainSubstring("2001:cafe:248"))
})
It("Delete and restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})
It("Checks Node Status", func() {
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParseNodes(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Checks Pod Status", func() {
Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "420s", "5s").Should(Succeed())
_, err := e2e.ParsePods(kubeConfigFile, true)
Expect(err).NotTo(HaveOccurred())
})
It("Verifies that each pod of agent-0 has IPv6 from the new CIDR", func() {
pods, err := e2e.ParsePods(kubeConfigFile, false)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if pod.Node == "agent-0" {
Expect(pod.IP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name)
}
}
})
})
})
var failed bool
var _ = AfterEach(func() {
failed = failed || CurrentSpecReport().Failed()
})
var _ = AfterSuite(func() {
if failed && !*ci {
fmt.Println("FAILED!")
} else {
Expect(e2e.GetCoverageReport(append(serverNodeNames, agentNodeNames...))).To(Succeed())
if os.Getenv("E2E_GOCOVER") != "" {
Expect(os.Rename("coverage.out", "coverage-ipv6.out")).To(Succeed())
}
Expect(e2e.DestroyCluster()).To(Succeed())
Expect(os.Remove(kubeConfigFile)).To(Succeed())
}
})