Fix CloudDualStackNodeIPs feature-gate inconsistency

Enable the feature-gate for both kubelet and cloud-controller-manager. Enabling it on only one side breaks RKE2, where feature-gates are not shared due to running in different processes.

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2023-10-17 05:43:20 +00:00 committed by Brad Davidson
parent 0c9bf36fe0
commit b8dc95539b
3 changed files with 19 additions and 8 deletions

View File

@ -123,12 +123,21 @@ func kubeletArgs(cfg *config.Agent) map[string]string {
if cfg.NodeName != "" {
argsMap["hostname-override"] = cfg.NodeName
}
if nodeIPs := util.JoinIPs(cfg.NodeIPs); nodeIPs != "" {
// If the embedded CCM is disabled, don't assume that dual-stack node IPs are safe.
// When using an external CCM, the user wants dual-stack node IPs, they will need to set the node-ip kubelet arg directly.
// This should be fine since most cloud providers have their own way of finding node IPs that doesn't depend on the kubelet
// setting them.
if cfg.DisableCCM {
dualStack, err := utilsnet.IsDualStackIPs(cfg.NodeIPs)
if err == nil && dualStack {
argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "CloudDualStackNodeIPs=true")
if err == nil && !dualStack {
argsMap["node-ip"] = cfg.NodeIP
}
} else {
// Cluster is using the embedded CCM, we know that the feature-gate will be enabled there as well.
argsMap["feature-gates"] = util.AddFeatureGate(argsMap["feature-gates"], "CloudDualStackNodeIPs=true")
if nodeIPs := util.JoinIPs(cfg.NodeIPs); nodeIPs != "" {
argsMap["node-ip"] = util.JoinIPs(cfg.NodeIPs)
}
argsMap["node-ip"] = nodeIPs
}
kubeletRoot, runtimeRoot, controllers := cgroups.CheckCgroups()
if !controllers["cpu"] {

View File

@ -313,6 +313,7 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control) error {
"authentication-kubeconfig": runtime.KubeConfigCloudController,
"node-status-update-frequency": "1m0s",
"bind-address": cfg.Loopback(false),
"feature-gates": "CloudDualStackNodeIPs=true",
}
if cfg.NoLeaderElect {
argsMap["leader-elect"] = "false"

View File

@ -148,11 +148,12 @@ var _ = Describe("create a new cluster with kube-* flags", Ordered, func() {
It("should find cloud-controller-manager starting with"+
"\"--cloud-node,--cloud-node-lifecycle,--secure-port=0\" flags ", func() {
Eventually(func() error {
match, err := testutil.SearchK3sLog(server, "Running cloud-controller-manager --allocate-node-cidrs=true"+
" --authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig"+
" --authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 "+
match, err := testutil.SearchK3sLog(server, "Running cloud-controller-manager --allocate-node-cidrs=true "+
"--authentication-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+
"--authorization-kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig --bind-address=127.0.0.1 "+
"--cloud-config=/var/lib/rancher/k3s/server/etc/cloud-config.yaml --cloud-provider=k3s --cluster-cidr=10.42.0.0/16 "+
"--configure-cloud-routes=false --controllers=*,-route,-cloud-node,-cloud-node-lifecycle --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+
"--configure-cloud-routes=false --controllers=*,-route,-cloud-node,-cloud-node-lifecycle "+
"--feature-gates=CloudDualStackNodeIPs=true --kubeconfig=/var/lib/rancher/k3s/server/cred/cloud-controller.kubeconfig "+
"--leader-elect-resource-name=k3s-cloud-controller-manager --node-status-update-frequency=1m0s --profiling=false --secure-port=0")
if err != nil {
return err