Add support for dual-stack Pod/Service CIDRs and node IP addresses (#3212)

* Add support for dual-stack cluster/service CIDRs and node addresses

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2021-04-21 15:56:20 -07:00 committed by GitHub
parent ac507e530d
commit 2705431d96
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 432 additions and 140 deletions

1
go.mod
View File

@ -122,5 +122,6 @@ require (
k8s.io/klog v1.0.0
k8s.io/kubectl v0.21.0
k8s.io/kubernetes v1.21.0
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
sigs.k8s.io/yaml v1.2.0
)

View File

@ -27,6 +27,7 @@ import (
"github.com/rancher/k3s/pkg/clientaccess"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control/deps"
"github.com/rancher/k3s/pkg/util"
"github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/util/json"
@ -41,7 +42,7 @@ func Get(ctx context.Context, agent cmds.Agent, proxy proxy.Proxy) *config.Node
for {
agentConfig, err := get(ctx, &agent, proxy)
if err != nil {
logrus.Errorf("Failed to retrieve agent config: %v", err)
logrus.Errorf("Failed to configure agent: %v", err)
select {
case <-time.After(5 * time.Second):
continue
@ -64,7 +65,7 @@ func Request(path string, info *clientaccess.Info, requester HTTPRequester) ([]b
return requester(u.String(), clientaccess.GetHTTPClient(info.CACerts), info.Username, info.Password)
}
func getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile string) HTTPRequester {
func getNodeNamedCrt(nodeName string, nodeIPs []sysnet.IP, nodePasswordFile string) HTTPRequester {
return func(u string, client *http.Client, username, password string) ([]byte, error) {
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil {
@ -81,7 +82,7 @@ func getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile string) HTTPRequester {
return nil, err
}
req.Header.Set(version.Program+"-Node-Password", nodePassword)
req.Header.Set(version.Program+"-Node-IP", nodeIP)
req.Header.Set(version.Program+"-Node-IP", util.JoinIPs(nodeIPs))
resp, err := client.Do(req)
if err != nil {
@ -144,8 +145,8 @@ func upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile string)
}
}
func getServingCert(nodeName, nodeIP, servingCertFile, servingKeyFile, nodePasswordFile string, info *clientaccess.Info) (*tls.Certificate, error) {
servingCert, err := Request("/v1-"+version.Program+"/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile))
func getServingCert(nodeName string, nodeIPs []sysnet.IP, servingCertFile, servingKeyFile, nodePasswordFile string, info *clientaccess.Info) (*tls.Certificate, error) {
servingCert, err := Request("/v1-"+version.Program+"/serving-kubelet.crt", info, getNodeNamedCrt(nodeName, nodeIPs, nodePasswordFile))
if err != nil {
return nil, err
}
@ -207,9 +208,9 @@ func splitCertKeyPEM(bytes []byte) (certPem []byte, keyPem []byte) {
return
}
func getNodeNamedHostFile(filename, keyFile, nodeName, nodeIP, nodePasswordFile string, info *clientaccess.Info) error {
func getNodeNamedHostFile(filename, keyFile, nodeName string, nodeIPs []sysnet.IP, nodePasswordFile string, info *clientaccess.Info) error {
basename := filepath.Base(filename)
fileBytes, err := Request("/v1-"+version.Program+"/"+basename, info, getNodeNamedCrt(nodeName, nodeIP, nodePasswordFile))
fileBytes, err := Request("/v1-"+version.Program+"/"+basename, info, getNodeNamedCrt(nodeName, nodeIPs, nodePasswordFile))
if err != nil {
return err
}
@ -224,21 +225,31 @@ func getNodeNamedHostFile(filename, keyFile, nodeName, nodeIP, nodePasswordFile
return nil
}
func getHostnameAndIP(info cmds.Agent) (string, string, error) {
ip := info.NodeIP
if ip == "" {
func getHostnameAndIPs(info cmds.Agent) (string, []sysnet.IP, error) {
ips := []sysnet.IP{}
if len(info.NodeIP) == 0 {
hostIP, err := net.ChooseHostInterface()
if err != nil {
return "", "", err
return "", nil, err
}
ips = append(ips, hostIP)
} else {
for _, hostIP := range info.NodeIP {
for _, v := range strings.Split(hostIP, ",") {
ip := sysnet.ParseIP(v)
if ip == nil {
return "", nil, fmt.Errorf("invalid node-ip %s", v)
}
ips = append(ips, ip)
}
}
ip = hostIP.String()
}
name := info.NodeName
if name == "" {
hostname, err := os.Hostname()
if err != nil {
return "", "", err
return "", nil, err
}
name = hostname
}
@ -247,7 +258,7 @@ func getHostnameAndIP(info cmds.Agent) (string, string, error) {
// https://github.com/kubernetes/kubernetes/issues/71140
name = strings.ToLower(name)
return name, ip, nil
return name, ips, nil
}
func isValidResolvConf(resolvConfFile string) bool {
@ -305,7 +316,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
controlConfig, err := getConfig(info)
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to retrieve configuration from server")
}
// If the supervisor and externally-facing apiserver are not on the same port, tell the proxy where to find the apiserver.
@ -349,7 +360,7 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
newNodePasswordFile := filepath.Join(nodeConfigPath, "password")
upgradeOldNodePasswordPath(oldNodePasswordFile, newNodePasswordFile)
nodeName, nodeIP, err := getHostnameAndIP(*envInfo)
nodeName, nodeIPs, err := getHostnameAndIPs(*envInfo)
if err != nil {
return nil, err
}
@ -364,14 +375,14 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
os.Setenv("NODE_NAME", nodeName)
servingCert, err := getServingCert(nodeName, nodeIP, servingKubeletCert, servingKubeletKey, newNodePasswordFile, info)
servingCert, err := getServingCert(nodeName, nodeIPs, servingKubeletCert, servingKubeletKey, newNodePasswordFile, info)
if err != nil {
return nil, err
}
clientKubeletCert := filepath.Join(envInfo.DataDir, "agent", "client-kubelet.crt")
clientKubeletKey := filepath.Join(envInfo.DataDir, "agent", "client-kubelet.key")
if err := getNodeNamedHostFile(clientKubeletCert, clientKubeletKey, nodeName, nodeIP, newNodePasswordFile, info); err != nil {
if err := getNodeNamedHostFile(clientKubeletCert, clientKubeletKey, nodeName, nodeIPs, newNodePasswordFile, info); err != nil {
return nil, err
}
@ -411,10 +422,8 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
}
nodeConfig.FlannelIface = flannelIface
nodeConfig.Images = filepath.Join(envInfo.DataDir, "agent", "images")
nodeConfig.AgentConfig.NodeIP = nodeIP
nodeConfig.AgentConfig.NodeName = nodeName
nodeConfig.AgentConfig.NodeConfigPath = nodeConfigPath
nodeConfig.AgentConfig.NodeExternalIP = envInfo.NodeExternalIP
nodeConfig.AgentConfig.ServingKubeletCert = servingKubeletCert
nodeConfig.AgentConfig.ServingKubeletKey = servingKubeletKey
nodeConfig.AgentConfig.ClusterDNS = controlConfig.ClusterDNS
@ -458,6 +467,32 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
nodeConfig.Containerd.Template = filepath.Join(envInfo.DataDir, "agent", "etc", "containerd", "config.toml.tmpl")
nodeConfig.Certificate = servingCert
nodeConfig.AgentConfig.NodeIPs = nodeIPs
nodeIP, err := util.GetFirst4(nodeIPs)
if err != nil {
return nil, errors.Wrap(err, "cannot configure IPv4 node-ip")
}
nodeConfig.AgentConfig.NodeIP = nodeIP.String()
for _, externalIP := range envInfo.NodeExternalIP {
for _, v := range strings.Split(externalIP, ",") {
ip := sysnet.ParseIP(v)
if ip == nil {
return nil, fmt.Errorf("invalid node-external-ip %s", v)
}
nodeConfig.AgentConfig.NodeExternalIPs = append(nodeConfig.AgentConfig.NodeExternalIPs, ip)
}
}
// if configured, set NodeExternalIP to the first IPv4 address, for legacy clients
if len(nodeConfig.AgentConfig.NodeExternalIPs) > 0 {
nodeExternalIP, err := util.GetFirst4(nodeConfig.AgentConfig.NodeExternalIPs)
if err != nil {
return nil, errors.Wrap(err, "cannot configure IPv4 node-external-ip")
}
nodeConfig.AgentConfig.NodeExternalIP = nodeExternalIP.String()
}
if nodeConfig.FlannelBackend == config.FlannelBackendNone {
nodeConfig.NoFlannel = true
} else {
@ -488,27 +523,35 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
}
if controlConfig.ClusterIPRange != nil {
nodeConfig.AgentConfig.ClusterCIDR = *controlConfig.ClusterIPRange
nodeConfig.AgentConfig.ClusterCIDR = controlConfig.ClusterIPRange
nodeConfig.AgentConfig.ClusterCIDRs = []*sysnet.IPNet{controlConfig.ClusterIPRange}
}
if len(controlConfig.ClusterIPRanges) > 0 {
nodeConfig.AgentConfig.ClusterCIDRs = controlConfig.ClusterIPRanges
}
if controlConfig.ServiceIPRange != nil {
nodeConfig.AgentConfig.ServiceCIDR = *controlConfig.ServiceIPRange
nodeConfig.AgentConfig.ServiceCIDR = controlConfig.ServiceIPRange
nodeConfig.AgentConfig.ServiceCIDRs = []*sysnet.IPNet{controlConfig.ServiceIPRange}
}
if len(controlConfig.ServiceIPRanges) > 0 {
nodeConfig.AgentConfig.ServiceCIDRs = controlConfig.ServiceIPRanges
}
if controlConfig.ServiceNodePortRange != nil {
nodeConfig.AgentConfig.ServiceNodePortRange = *controlConfig.ServiceNodePortRange
}
// Old versions of the server do not send enough information to correctly start the NPC. Users
// need to upgrade the server to at least the same version as the agent, or disable the NPC
// cluster-wide.
if controlConfig.DisableNPC == false && (controlConfig.ServiceIPRange == nil || controlConfig.ServiceNodePortRange == nil) {
return nil, fmt.Errorf("incompatible down-level server detected; servers must be upgraded to at least %s, or restarted with --disable-network-policy", version.Version)
if len(controlConfig.ClusterDNSs) == 0 {
nodeConfig.AgentConfig.ClusterDNSs = []sysnet.IP{controlConfig.ClusterDNS}
} else {
nodeConfig.AgentConfig.ClusterDNSs = controlConfig.ClusterDNSs
}
nodeConfig.AgentConfig.ExtraKubeletArgs = envInfo.ExtraKubeletArgs
nodeConfig.AgentConfig.ExtraKubeProxyArgs = envInfo.ExtraKubeProxyArgs
nodeConfig.AgentConfig.NodeTaints = envInfo.Taints
nodeConfig.AgentConfig.NodeLabels = envInfo.Labels
nodeConfig.AgentConfig.PrivateRegistry = envInfo.PrivateRegistry
@ -520,6 +563,10 @@ func get(ctx context.Context, envInfo *cmds.Agent, proxy proxy.Proxy) (*config.N
nodeConfig.AgentConfig.PodManifests = filepath.Join(envInfo.DataDir, "agent", DefaultPodManifestPath)
nodeConfig.AgentConfig.ProtectKernelDefaults = envInfo.ProtectKernelDefaults
if err := validateNetworkConfig(nodeConfig); err != nil {
return nil, err
}
return nodeConfig, nil
}
@ -532,3 +579,15 @@ func getConfig(info *clientaccess.Info) (*config.Control, error) {
controlControl := &config.Control{}
return controlControl, json.Unmarshal(data, controlControl)
}
// validateNetworkConfig ensures that the network configuration values provided by the server make sense.
func validateNetworkConfig(nodeConfig *config.Node) error {
// Old versions of the server do not send enough information to correctly start the NPC. Users
// need to upgrade the server to at least the same version as the agent, or disable the NPC
// cluster-wide.
if nodeConfig.AgentConfig.DisableNPC == false && (nodeConfig.AgentConfig.ServiceCIDR == nil || nodeConfig.AgentConfig.ServiceNodePortRange.Size == 0) {
return fmt.Errorf("incompatible down-level server detected; servers must be upgraded to at least %s, or restarted with --disable-network-policy", version.Version)
}
return nil
}

View File

@ -592,7 +592,7 @@ func NewNetworkPolicyController(clientset kubernetes.Interface,
// be up to date with all of the policy changes from any enqueued request after that
npc.fullSyncRequestChan = make(chan struct{}, 1)
npc.serviceClusterIPRange = config.AgentConfig.ServiceCIDR
npc.serviceClusterIPRange = *config.AgentConfig.ServiceCIDR
npc.serviceNodePortRange = strings.ReplaceAll(config.AgentConfig.ServiceNodePortRange.String(), "-", ":")
npc.syncPeriod = defaultSyncPeriod

View File

@ -253,15 +253,17 @@ func testForMissingOrUnwanted(t *testing.T, targetMsg string, got []podInfo, wan
}
}
func newMinimalNodeConfig(clusterIPCIDR string, nodePortRange string, hostNameOverride string, externalIPs []string) *config.Node {
func newMinimalNodeConfig(serviceIPCIDR string, nodePortRange string, hostNameOverride string, externalIPs []string) *config.Node {
nodeConfig := &config.Node{AgentConfig: config.Agent{}}
if clusterIPCIDR != "" {
_, cidr, err := net.ParseCIDR(clusterIPCIDR)
if serviceIPCIDR != "" {
_, cidr, err := net.ParseCIDR(serviceIPCIDR)
if err != nil {
panic("failed to get parse --service-cluster-ip-range parameter: " + err.Error())
}
nodeConfig.AgentConfig.ClusterCIDR = *cidr
nodeConfig.AgentConfig.ServiceCIDR = cidr
} else {
nodeConfig.AgentConfig.ServiceCIDR = &net.IPNet{}
}
if nodePortRange != "" {
portRange, err := utilnet.ParsePortRange(nodePortRange)

View File

@ -2,7 +2,6 @@ package agent
import (
"context"
"errors"
"fmt"
"io/ioutil"
"net/url"
@ -14,6 +13,7 @@ import (
"github.com/containerd/cgroups"
cgroupsv2 "github.com/containerd/cgroups/v2"
systemd "github.com/coreos/go-systemd/daemon"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/agent/config"
"github.com/rancher/k3s/pkg/agent/containerd"
"github.com/rancher/k3s/pkg/agent/flannel"
@ -23,11 +23,12 @@ import (
"github.com/rancher/k3s/pkg/agent/tunnel"
"github.com/rancher/k3s/pkg/cli/cmds"
"github.com/rancher/k3s/pkg/clientaccess"
cp "github.com/rancher/k3s/pkg/cloudprovider"
"github.com/rancher/k3s/pkg/daemons/agent"
daemonconfig "github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/nodeconfig"
"github.com/rancher/k3s/pkg/rootless"
"github.com/rancher/k3s/pkg/version"
"github.com/rancher/k3s/pkg/util"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -36,12 +37,7 @@ import (
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/controller-manager/app"
)
var (
InternalIPLabel = version.Program + ".io/internal-ip"
ExternalIPLabel = version.Program + ".io/external-ip"
HostnameLabel = version.Program + ".io/hostname"
utilsnet "k8s.io/utils/net"
)
const (
@ -76,6 +72,21 @@ func setupCriCtlConfig(cfg cmds.Agent, nodeConfig *daemonconfig.Node) error {
func run(ctx context.Context, cfg cmds.Agent, proxy proxy.Proxy) error {
nodeConfig := config.Get(ctx, cfg, proxy)
dualCluster, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ClusterCIDRs)
if err != nil {
return errors.Wrap(err, "failed to validate cluster-cidr")
}
dualService, err := utilsnet.IsDualStackCIDRs(nodeConfig.AgentConfig.ServiceCIDRs)
if err != nil {
return errors.Wrap(err, "failed to validate service-cidr")
}
dualNode, err := utilsnet.IsDualStackIPs(nodeConfig.AgentConfig.NodeIPs)
if err != nil {
return errors.Wrap(err, "failed to validate node-ip")
}
syssetup.Configure(dualCluster || dualService || dualNode)
if err := setupCriCtlConfig(cfg, nodeConfig); err != nil {
return err
}
@ -135,7 +146,6 @@ func Run(ctx context.Context, cfg cmds.Agent) error {
if err := validate(); err != nil {
return err
}
syssetup.Configure()
if cfg.Rootless && !cfg.RootlessAlreadyUnshared {
if err := rootless.Rootless(cfg.DataDir); err != nil {
@ -231,22 +241,30 @@ func configureNode(ctx context.Context, agentConfig *daemonconfig.Agent, nodes v
continue
}
newLabels, updateMutables := updateMutableLabels(agentConfig, node.Labels)
updateNode := false
if labels, changed := updateMutableLabels(agentConfig, node.Labels); changed {
node.Labels = labels
updateNode = true
}
updateAddresses := !agentConfig.DisableCCM
if updateAddresses {
newLabels, updateAddresses = updateAddressLabels(agentConfig, newLabels)
if !agentConfig.DisableCCM {
if annotations, changed := updateAddressAnnotations(agentConfig, node.Annotations); changed {
node.Annotations = annotations
updateNode = true
}
if labels, changed := updateLegacyAddressLabels(agentConfig, node.Labels); changed {
node.Labels = labels
updateNode = true
}
}
// inject node config
updateNode, err := nodeconfig.SetNodeConfigAnnotations(node)
if err != nil {
if changed, err := nodeconfig.SetNodeConfigAnnotations(node); err != nil {
return err
}
if updateAddresses || updateMutables {
node.Labels = newLabels
} else if changed {
updateNode = true
}
if updateNode {
if _, err := nodes.Update(ctx, node, metav1.UpdateOptions{}); err != nil {
logrus.Infof("Failed to update node %s: %v", agentConfig.NodeName, err)
@ -286,18 +304,36 @@ func updateMutableLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
}
func updateAddressLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
func updateLegacyAddressLabels(agentConfig *daemonconfig.Agent, nodeLabels map[string]string) (map[string]string, bool) {
ls := labels.Set(nodeLabels)
if ls.Has(cp.InternalIPKey) || ls.Has(cp.HostnameKey) {
result := map[string]string{
cp.InternalIPKey: agentConfig.NodeIP,
cp.HostnameKey: agentConfig.NodeName,
}
if agentConfig.NodeExternalIP != "" {
result[cp.ExternalIPKey] = agentConfig.NodeExternalIP
}
result = labels.Merge(nodeLabels, result)
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
}
return nil, false
}
func updateAddressAnnotations(agentConfig *daemonconfig.Agent, nodeAnnotations map[string]string) (map[string]string, bool) {
result := map[string]string{
InternalIPLabel: agentConfig.NodeIP,
HostnameLabel: agentConfig.NodeName,
cp.InternalIPKey: util.JoinIPs(agentConfig.NodeIPs),
cp.HostnameKey: agentConfig.NodeName,
}
if agentConfig.NodeExternalIP != "" {
result[ExternalIPLabel] = agentConfig.NodeExternalIP
result[cp.ExternalIPKey] = util.JoinIPs(agentConfig.NodeExternalIPs)
}
result = labels.Merge(nodeLabels, result)
return result, !equality.Semantic.DeepEqual(nodeLabels, result)
result = labels.Merge(nodeAnnotations, result)
return result, !equality.Semantic.DeepEqual(nodeAnnotations, result)
}
// setupTunnelAndRunAgent should start the setup tunnel before starting kubelet and kubeproxy

View File

@ -27,19 +27,24 @@ func enableSystemControl(file string) {
}
}
func Configure() {
func Configure(enableIPv6 bool) {
loadKernelModule("overlay")
loadKernelModule("nf_conntrack")
loadKernelModule("br_netfilter")
loadKernelModule("iptable_nat")
if enableIPv6 {
loadKernelModule("ip6table_nat")
}
// Kernel is inconsistent about how devconf is configured for
// new network namespaces between ipv4 and ipv6. Make sure to
// enable forwarding on all and default for both ipv4 and ipv8.
// enable forwarding on all and default for both ipv4 and ipv6.
enableSystemControl("/proc/sys/net/ipv4/conf/all/forwarding")
enableSystemControl("/proc/sys/net/ipv4/conf/default/forwarding")
enableSystemControl("/proc/sys/net/ipv6/conf/all/forwarding")
enableSystemControl("/proc/sys/net/ipv6/conf/default/forwarding")
enableSystemControl("/proc/sys/net/bridge/bridge-nf-call-iptables")
enableSystemControl("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
if enableIPv6 {
enableSystemControl("/proc/sys/net/ipv6/conf/all/forwarding")
enableSystemControl("/proc/sys/net/ipv6/conf/default/forwarding")
enableSystemControl("/proc/sys/net/bridge/bridge-nf-call-ip6tables")
}
}

View File

@ -50,8 +50,8 @@ func Run(ctx *cli.Context) error {
return fmt.Errorf("--server is required")
}
if cmds.AgentConfig.FlannelIface != "" && cmds.AgentConfig.NodeIP == "" {
cmds.AgentConfig.NodeIP = netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface)
if cmds.AgentConfig.FlannelIface != "" && len(cmds.AgentConfig.NodeIP) == 0 {
cmds.AgentConfig.NodeIP.Set(netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface))
}
logrus.Info("Starting " + version.Program + " agent " + ctx.App.Version)

View File

@ -20,8 +20,8 @@ type Agent struct {
LBServerPort int
ResolvConf string
DataDir string
NodeIP string
NodeExternalIP string
NodeIP cli.StringSlice
NodeExternalIP cli.StringSlice
NodeName string
PauseImage string
Snapshotter string
@ -52,15 +52,15 @@ type AgentShared struct {
var (
appName = filepath.Base(os.Args[0])
AgentConfig Agent
NodeIPFlag = cli.StringFlag{
Name: "node-ip,i",
Usage: "(agent/networking) IP address to advertise for node",
Destination: &AgentConfig.NodeIP,
NodeIPFlag = cli.StringSliceFlag{
Name: "node-ip,i",
Usage: "(agent/networking) IPv4/IPv6 addresses to advertise for node",
Value: &AgentConfig.NodeIP,
}
NodeExternalIPFlag = cli.StringFlag{
Name: "node-external-ip",
Usage: "(agent/networking) External IP address to advertise for node",
Destination: &AgentConfig.NodeExternalIP,
NodeExternalIPFlag = cli.StringSliceFlag{
Name: "node-external-ip",
Usage: "(agent/networking) IPv4/IPv6 external IP addresses to advertise for node",
Value: &AgentConfig.NodeExternalIP,
}
NodeNameFlag = cli.StringFlag{
Name: "node-name",

View File

@ -13,15 +13,15 @@ const (
)
type Server struct {
ClusterCIDR string
ClusterCIDR cli.StringSlice
AgentToken string
AgentTokenFile string
Token string
TokenFile string
ClusterSecret string
ServiceCIDR string
ServiceCIDR cli.StringSlice
ServiceNodePortRange string
ClusterDNS string
ClusterDNS cli.StringSlice
ClusterDomain string
// The port which kubectl clients can access k8s
HTTPSPort int
@ -108,7 +108,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
},
cli.StringFlag{
Name: "advertise-address",
Usage: "(listener) IP address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip)",
Usage: "(listener) IPv4 address that apiserver uses to advertise to members of the cluster (default: node-external-ip/node-ip)",
Destination: &ServerConfig.AdvertiseIP,
},
cli.IntFlag{
@ -118,7 +118,7 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
},
cli.StringSliceFlag{
Name: "tls-san",
Usage: "(listener) Add additional hostname or IP as a Subject Alternative Name in the TLS cert",
Usage: "(listener) Add additional hostnames or IPv4/IPv6 addresses as Subject Alternative Names on the server TLS cert",
Value: &ServerConfig.TLSSan,
},
cli.StringFlag{
@ -126,17 +126,15 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
Usage: "(data) Folder to hold state default /var/lib/rancher/" + version.Program + " or ${HOME}/.rancher/" + version.Program + " if not root",
Destination: &ServerConfig.DataDir,
},
cli.StringFlag{
Name: "cluster-cidr",
Usage: "(networking) Network CIDR to use for pod IPs",
Destination: &ServerConfig.ClusterCIDR,
Value: "10.42.0.0/16",
cli.StringSliceFlag{
Name: "cluster-cidr",
Usage: "(networking) IPv4/IPv6 network CIDRs to use for pod IPs (default: 10.42.0.0/16)",
Value: &ServerConfig.ClusterCIDR,
},
cli.StringFlag{
Name: "service-cidr",
Usage: "(networking) Network CIDR to use for services IPs",
Destination: &ServerConfig.ServiceCIDR,
Value: "10.43.0.0/16",
cli.StringSliceFlag{
Name: "service-cidr",
Usage: "(networking) IPv4/IPv6 network CIDRs to use for service IPs (default: 10.43.0.0/16)",
Value: &ServerConfig.ServiceCIDR,
},
cli.StringFlag{
Name: "service-node-port-range",
@ -144,11 +142,10 @@ func NewServerCommand(action func(*cli.Context) error) cli.Command {
Destination: &ServerConfig.ServiceNodePortRange,
Value: "30000-32767",
},
cli.StringFlag{
Name: "cluster-dns",
Usage: "(networking) Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10)",
Destination: &ServerConfig.ClusterDNS,
Value: "",
cli.StringSliceFlag{
Name: "cluster-dns",
Usage: "(networking) IPv4 Cluster IP for coredns service. Should be in your service-cidr range (default: 10.43.0.10)",
Value: &ServerConfig.ClusterDNS,
},
cli.StringFlag{
Name: "cluster-domain",

View File

@ -20,6 +20,7 @@ import (
"github.com/rancher/k3s/pkg/rootless"
"github.com/rancher/k3s/pkg/server"
"github.com/rancher/k3s/pkg/token"
"github.com/rancher/k3s/pkg/util"
"github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler/pkg/signals"
"github.com/sirupsen/logrus"
@ -27,6 +28,7 @@ import (
utilnet "k8s.io/apimachinery/pkg/util/net"
kubeapiserverflag "k8s.io/component-base/cli/flag"
"k8s.io/kubernetes/pkg/controlplane"
utilsnet "k8s.io/utils/net"
_ "github.com/go-sql-driver/mysql" // ensure we have mysql
_ "github.com/lib/pq" // ensure we have postgres
@ -140,7 +142,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
serverConfig.ControlConfig.EtcdS3Folder = cfg.EtcdS3Folder
if cfg.ClusterResetRestorePath != "" && !cfg.ClusterReset {
return errors.New("invalid flag use. --cluster-reset required with --cluster-reset-restore-path")
return errors.New("invalid flag use; --cluster-reset required with --cluster-reset-restore-path")
}
// make sure components are disabled so we only perform a restore
@ -161,7 +163,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
}
if serverConfig.ControlConfig.DisableETCD && serverConfig.ControlConfig.JoinURL == "" {
return errors.New("invalid flag use. --server required with --disable-etcd")
return errors.New("invalid flag use; --server is required with --disable-etcd")
}
if serverConfig.ControlConfig.DisableAPIServer {
@ -174,50 +176,116 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
}
}
if cmds.AgentConfig.FlannelIface != "" && cmds.AgentConfig.NodeIP == "" {
cmds.AgentConfig.NodeIP = netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface)
if cmds.AgentConfig.FlannelIface != "" && len(cmds.AgentConfig.NodeIP) == 0 {
cmds.AgentConfig.NodeIP.Set(netutil.GetIPFromInterface(cmds.AgentConfig.FlannelIface))
}
if serverConfig.ControlConfig.PrivateIP == "" && cmds.AgentConfig.NodeIP != "" {
serverConfig.ControlConfig.PrivateIP = cmds.AgentConfig.NodeIP
if serverConfig.ControlConfig.PrivateIP == "" && len(cmds.AgentConfig.NodeIP) != 0 {
// ignoring the error here is fine since etcd will fall back to the interface's IPv4 address
serverConfig.ControlConfig.PrivateIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeIP)
}
if serverConfig.ControlConfig.AdvertiseIP == "" && cmds.AgentConfig.NodeExternalIP != "" {
serverConfig.ControlConfig.AdvertiseIP = cmds.AgentConfig.NodeExternalIP
// if not set, try setting advertise-ip from agent node-external-ip
if serverConfig.ControlConfig.AdvertiseIP == "" && len(cmds.AgentConfig.NodeExternalIP) != 0 {
serverConfig.ControlConfig.AdvertiseIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeExternalIP)
}
if serverConfig.ControlConfig.AdvertiseIP == "" && cmds.AgentConfig.NodeIP != "" {
serverConfig.ControlConfig.AdvertiseIP = cmds.AgentConfig.NodeIP
// if not set, try setting advertise-up from agent node-ip
if serverConfig.ControlConfig.AdvertiseIP == "" && len(cmds.AgentConfig.NodeIP) != 0 {
serverConfig.ControlConfig.AdvertiseIP, _ = util.GetFirst4String(cmds.AgentConfig.NodeIP)
}
// if we ended up with any advertise-ips, ensure they're added to the SAN list;
// note that kube-apiserver does not support dual-stack advertise-ip as of 1.21.0:
/// https://github.com/kubernetes/kubeadm/issues/1612#issuecomment-772583989
if serverConfig.ControlConfig.AdvertiseIP != "" {
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, serverConfig.ControlConfig.AdvertiseIP)
}
_, serverConfig.ControlConfig.ClusterIPRange, err = net.ParseCIDR(cfg.ClusterCIDR)
if err != nil {
return errors.Wrapf(err, "Invalid CIDR %s: %v", cfg.ClusterCIDR, err)
// configure ClusterIPRanges
if len(cmds.ServerConfig.ClusterCIDR) == 0 {
cmds.ServerConfig.ClusterCIDR.Set("10.42.0.0/16")
}
_, serverConfig.ControlConfig.ServiceIPRange, err = net.ParseCIDR(cfg.ServiceCIDR)
if err != nil {
return errors.Wrapf(err, "Invalid CIDR %s: %v", cfg.ServiceCIDR, err)
for _, cidr := range cmds.ServerConfig.ClusterCIDR {
for _, v := range strings.Split(cidr, ",") {
_, parsed, err := net.ParseCIDR(v)
if err != nil {
return errors.Wrapf(err, "invalid cluster-cidr %s", v)
}
serverConfig.ControlConfig.ClusterIPRanges = append(serverConfig.ControlConfig.ClusterIPRanges, parsed)
}
}
// set ClusterIPRange to the first IPv4 block, for legacy clients
clusterIPRange, err := util.GetFirst4Net(serverConfig.ControlConfig.ClusterIPRanges)
if err != nil {
return errors.Wrap(err, "cannot configure IPv4 cluster-cidr")
}
serverConfig.ControlConfig.ClusterIPRange = clusterIPRange
// configure ServiceIPRanges
if len(cmds.ServerConfig.ServiceCIDR) == 0 {
cmds.ServerConfig.ServiceCIDR.Set("10.43.0.0/16")
}
for _, cidr := range cmds.ServerConfig.ServiceCIDR {
for _, v := range strings.Split(cidr, ",") {
_, parsed, err := net.ParseCIDR(v)
if err != nil {
return errors.Wrapf(err, "invalid service-cidr %s", v)
}
serverConfig.ControlConfig.ServiceIPRanges = append(serverConfig.ControlConfig.ServiceIPRanges, parsed)
}
}
// set ServiceIPRange to the first IPv4 block, for legacy clients
serviceIPRange, err := util.GetFirst4Net(serverConfig.ControlConfig.ServiceIPRanges)
if err != nil {
return errors.Wrap(err, "cannot configure IPv4 service-cidr")
}
serverConfig.ControlConfig.ServiceIPRange = serviceIPRange
serverConfig.ControlConfig.ServiceNodePortRange, err = utilnet.ParsePortRange(cfg.ServiceNodePortRange)
if err != nil {
return errors.Wrapf(err, "Invalid port range %s: %v", cfg.ServiceNodePortRange, err)
return errors.Wrapf(err, "invalid port range %s", cfg.ServiceNodePortRange)
}
// the apiserver service does not yet support dual-stack operation
_, apiServerServiceIP, err := controlplane.ServiceIPRange(*serverConfig.ControlConfig.ServiceIPRange)
if err != nil {
return err
}
serverConfig.ControlConfig.SANs = append(serverConfig.ControlConfig.SANs, apiServerServiceIP.String())
// If cluster-dns CLI arg is not set, we set ClusterDNS address to be ServiceCIDR network + 10,
// If cluster-dns CLI arg is not set, we set ClusterDNS address to be the first IPv4 ServiceCIDR network + 10,
// i.e. when you set service-cidr to 192.168.0.0/16 and don't provide cluster-dns, it will be set to 192.168.0.10
if cfg.ClusterDNS == "" {
serverConfig.ControlConfig.ClusterDNS = make(net.IP, 4)
copy(serverConfig.ControlConfig.ClusterDNS, serverConfig.ControlConfig.ServiceIPRange.IP.To4())
serverConfig.ControlConfig.ClusterDNS[3] = 10
// If there are no IPv4 ServiceCIDRs, an error will be raised.
if len(cmds.ServerConfig.ClusterDNS) == 0 {
clusterDNS, err := utilsnet.GetIndexedIP(serverConfig.ControlConfig.ServiceIPRange, 10)
if err != nil {
return errors.Wrap(err, "cannot configure default cluster-dns address")
}
serverConfig.ControlConfig.ClusterDNS = clusterDNS
serverConfig.ControlConfig.ClusterDNSs = []net.IP{serverConfig.ControlConfig.ClusterDNS}
} else {
serverConfig.ControlConfig.ClusterDNS = net.ParseIP(cfg.ClusterDNS)
for _, ip := range cmds.ServerConfig.ClusterDNS {
for _, v := range strings.Split(ip, ",") {
parsed := net.ParseIP(v)
if parsed == nil {
return fmt.Errorf("invalid cluster-dns address %s", v)
}
serverConfig.ControlConfig.ClusterDNSs = append(serverConfig.ControlConfig.ClusterDNSs, parsed)
}
}
// Set ClusterDNS to the first IPv4 address, for legacy clients
clusterDNS, err := util.GetFirst4(serverConfig.ControlConfig.ClusterDNSs)
if err != nil {
return errors.Wrap(err, "cannot configure IPv4 cluster-dns address")
}
serverConfig.ControlConfig.ClusterDNS = clusterDNS
}
if err := validateNetworkConfiguration(serverConfig); err != nil {
return err
}
if cfg.DefaultLocalStoragePath == "" {
@ -257,7 +325,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
tlsMinVersionArg := getArgValueFromList("tls-min-version", cfg.ExtraAPIArgs)
serverConfig.ControlConfig.TLSMinVersion, err = kubeapiserverflag.TLSVersion(tlsMinVersionArg)
if err != nil {
return errors.Wrap(err, "Invalid tls-min-version")
return errors.Wrap(err, "invalid tls-min-version")
}
serverConfig.StartupHooks = append(serverConfig.StartupHooks, cfg.StartupHooks...)
@ -285,7 +353,7 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
}
serverConfig.ControlConfig.TLSCipherSuites, err = kubeapiserverflag.TLSCipherSuites(tlsCipherSuites)
if err != nil {
return errors.Wrap(err, "Invalid tls-cipher-suites")
return errors.Wrap(err, "invalid tls-cipher-suites")
}
logrus.Info("Starting " + version.Program + " " + app.App.Version)
@ -353,6 +421,34 @@ func run(app *cli.Context, cfg *cmds.Server, leaderControllers server.CustomCont
return agent.Run(ctx, agentConfig)
}
// validateNetworkConfig ensures that the network configuration values make sense.
func validateNetworkConfiguration(serverConfig server.Config) error {
// Dual-stack operation requires fairly extensive manual configuration at the moment - do some
// preflight checks to make sure that the user isn't trying to use flannel/npc, or trying to
// enable dual-stack DNS (which we don't currently support since it's not easy to template)
dualCluster, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ClusterIPRanges)
if err != nil {
return errors.Wrap(err, "failed to validate cluster-cidr")
}
dualService, err := utilsnet.IsDualStackCIDRs(serverConfig.ControlConfig.ServiceIPRanges)
if err != nil {
return errors.Wrap(err, "failed to validate service-cidr")
}
dualDNS, err := utilsnet.IsDualStackIPs(serverConfig.ControlConfig.ClusterDNSs)
if err != nil {
return errors.Wrap(err, "failed to validate cluster-dns")
}
if (serverConfig.ControlConfig.FlannelBackend != "none" || serverConfig.ControlConfig.DisableNPC == false) && (dualCluster || dualService) {
return errors.New("flannel CNI and network policy enforcement are not compatible with dual-stack operation; server must be restarted with --flannel-backend=none --disable-network-policy and an alternative CNI plugin deployed")
}
if dualDNS == true {
return errors.New("dual-stack cluster-dns is not supported")
}
return nil
}
func knownIPs(ips []string) []string {
ips = append(ips, "127.0.0.1")
ip, err := utilnet.ChooseHostInterface()

View File

@ -3,6 +3,7 @@ package cloudprovider
import (
"context"
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/rancher/k3s/pkg/version"
@ -13,9 +14,9 @@ import (
)
var (
InternalIPLabel = version.Program + ".io/internal-ip"
ExternalIPLabel = version.Program + ".io/external-ip"
HostnameLabel = version.Program + ".io/hostname"
InternalIPKey = version.Program + ".io/internal-ip"
ExternalIPKey = version.Program + ".io/external-ip"
HostnameKey = version.Program + ".io/hostname"
)
func (k *k3s) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
@ -69,22 +70,32 @@ func (k *k3s) NodeAddresses(ctx context.Context, name types.NodeName) ([]corev1.
return nil, fmt.Errorf("Failed to find node %s: %v", name, err)
}
// check internal address
if node.Labels[InternalIPLabel] != "" {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: node.Labels[InternalIPLabel]})
if address := node.Annotations[InternalIPKey]; address != "" {
for _, v := range strings.Split(address, ",") {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: v})
}
} else if address = node.Labels[InternalIPKey]; address != "" {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: address})
} else {
logrus.Infof("Couldn't find node internal ip label on node %s", name)
logrus.Infof("Couldn't find node internal ip annotation or label on node %s", name)
}
// check external address
if node.Labels[ExternalIPLabel] != "" {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: node.Labels[ExternalIPLabel]})
if address := node.Annotations[ExternalIPKey]; address != "" {
for _, v := range strings.Split(address, ",") {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: v})
}
} else if address = node.Labels[ExternalIPKey]; address != "" {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: address})
}
// check hostname
if node.Labels[HostnameLabel] != "" {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: node.Labels[HostnameLabel]})
if address := node.Annotations[HostnameKey]; address != "" {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: address})
} else if address = node.Labels[HostnameKey]; address != "" {
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: address})
} else {
logrus.Infof("Couldn't find node hostname label on node %s", name)
logrus.Infof("Couldn't find node hostname annotation or label on node %s", name)
}
return addresses, nil

View File

@ -13,6 +13,7 @@ import (
"github.com/opencontainers/runc/libcontainer/system"
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/executor"
"github.com/rancher/k3s/pkg/util"
"github.com/rancher/k3s/pkg/version"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
@ -47,7 +48,7 @@ func startKubeProxy(cfg *config.Agent) error {
"proxy-mode": "iptables",
"healthz-bind-address": "127.0.0.1",
"kubeconfig": cfg.KubeConfigKubeProxy,
"cluster-cidr": cfg.ClusterCIDR.String(),
"cluster-cidr": util.JoinIPNets(cfg.ClusterCIDRs),
}
if cfg.NodeName != "" {
argsMap["hostname-override"] = cfg.NodeName
@ -94,7 +95,7 @@ func startKubelet(cfg *config.Agent) error {
argsMap["network-plugin"] = "cni"
}
if len(cfg.ClusterDNS) > 0 {
argsMap["cluster-dns"] = cfg.ClusterDNS.String()
argsMap["cluster-dns"] = util.JoinIPs(cfg.ClusterDNSs)
}
if cfg.ResolvConf != "" {
argsMap["resolv-conf"] = cfg.ResolvConf

View File

@ -58,10 +58,13 @@ type Agent struct {
NodeConfigPath string
ServingKubeletCert string
ServingKubeletKey string
ServiceCIDR net.IPNet
ServiceCIDR *net.IPNet
ServiceCIDRs []*net.IPNet
ServiceNodePortRange utilnet.PortRange
ClusterCIDR net.IPNet
ClusterCIDR *net.IPNet
ClusterCIDRs []*net.IPNet
ClusterDNS net.IP
ClusterDNSs []net.IP
ClusterDomain string
ResolvConf string
RootDir string
@ -69,7 +72,9 @@ type Agent struct {
KubeConfigKubeProxy string
KubeConfigK3sController string
NodeIP string
NodeIPs []net.IP
NodeExternalIP string
NodeExternalIPs []net.IP
RuntimeSocket string
ListenAddress string
ClientCA string
@ -106,9 +111,12 @@ type Control struct {
AgentToken string `json:"-"`
Token string `json:"-"`
ClusterIPRange *net.IPNet
ClusterIPRanges []*net.IPNet
ServiceIPRange *net.IPNet
ServiceIPRanges []*net.IPNet
ServiceNodePortRange *utilnet.PortRange
ClusterDNS net.IP
ClusterDNSs []net.IP
ClusterDomain string
NoCoreDNS bool
KubeConfigOutput string

View File

@ -16,6 +16,7 @@ import (
"github.com/rancher/k3s/pkg/daemons/config"
"github.com/rancher/k3s/pkg/daemons/control/deps"
"github.com/rancher/k3s/pkg/daemons/executor"
util2 "github.com/rancher/k3s/pkg/util"
"github.com/rancher/k3s/pkg/version"
"github.com/rancher/wrangler-api/pkg/generated/controllers/rbac"
"github.com/sirupsen/logrus"
@ -100,7 +101,7 @@ func controllerManager(cfg *config.Control, runtime *config.ControlRuntime) erro
"kubeconfig": runtime.KubeConfigController,
"service-account-private-key-file": runtime.ServiceKey,
"allocate-node-cidrs": "true",
"cluster-cidr": cfg.ClusterIPRange.String(),
"cluster-cidr": util2.JoinIPNets(cfg.ClusterIPRanges),
"root-ca-file": runtime.ServerCA,
"port": "10252",
"profiling": "false",
@ -155,7 +156,7 @@ func apiServer(ctx context.Context, cfg *config.Control, runtime *config.Control
argsMap["allow-privileged"] = "true"
argsMap["authorization-mode"] = strings.Join([]string{modes.ModeNode, modes.ModeRBAC}, ",")
argsMap["service-account-signing-key-file"] = runtime.ServiceKey
argsMap["service-cluster-ip-range"] = cfg.ServiceIPRange.String()
argsMap["service-cluster-ip-range"] = util2.JoinIPNets(cfg.ServiceIPRanges)
argsMap["service-node-port-range"] = cfg.ServiceNodePortRange.String()
argsMap["advertise-port"] = strconv.Itoa(cfg.AdvertisePort)
if cfg.AdvertiseIP != "" {
@ -360,7 +361,7 @@ func cloudControllerManager(ctx context.Context, cfg *config.Control, runtime *c
ccmOptions.KubeCloudShared.AllocateNodeCIDRs = true
ccmOptions.KubeCloudShared.CloudProvider.Name = version.Program
ccmOptions.KubeCloudShared.ClusterCIDR = cfg.ClusterIPRange.String()
ccmOptions.KubeCloudShared.ClusterCIDR = util2.JoinIPNets(cfg.ClusterIPRanges)
ccmOptions.KubeCloudShared.ConfigureCloudRoutes = false
ccmOptions.Kubeconfig = runtime.KubeConfigCloudController
ccmOptions.NodeStatusUpdateFrequency = metav1.Duration{Duration: 1 * time.Minute}

View File

@ -4,6 +4,7 @@ import (
"context"
"crypto"
"crypto/x509"
"fmt"
"io/ioutil"
"net"
"net/http"
@ -153,8 +154,16 @@ func servingKubeletCert(server *config.Control, keyFile string, auth nodePassBoo
}
ips := []net.IP{net.ParseIP("127.0.0.1")}
if nodeIP := req.Header.Get(version.Program + "-Node-IP"); nodeIP != "" {
ips = append(ips, net.ParseIP(nodeIP))
for _, v := range strings.Split(nodeIP, ",") {
ip := net.ParseIP(v)
if ip == nil {
sendError(fmt.Errorf("invalid IP address %s", ip), resp)
return
}
ips = append(ips, ip)
}
}
cert, err := certutil.NewSignedCert(certutil.Config{

View File

@ -435,8 +435,8 @@ func setNoProxyEnv(config *config.Control) error {
envList = append(envList,
".svc",
"."+config.ClusterDomain,
config.ClusterIPRange.String(),
config.ServiceIPRange.String(),
util.JoinIPNets(config.ClusterIPRanges),
util.JoinIPNets(config.ServiceIPRanges),
)
os.Unsetenv("no_proxy")
return os.Setenv("NO_PROXY", strings.Join(envList, ","))

65
pkg/util/net.go Normal file
View File

@ -0,0 +1,65 @@
package util
import (
"errors"
"net"
"strings"
)
// JoinIPs stringifies and joins a list of IP addresses with commas.
func JoinIPs(elems []net.IP) string {
var strs []string
for _, elem := range elems {
strs = append(strs, elem.String())
}
return strings.Join(strs, ",")
}
// JoinIPNets stringifies and joins a list of IP networks with commas.
func JoinIPNets(elems []*net.IPNet) string {
var strs []string
for _, elem := range elems {
strs = append(strs, elem.String())
}
return strings.Join(strs, ",")
}
// GetFirst4Net returns the first IPv4 network from the list of IP networks.
// If no IPv4 addresses are found, an error is raised.
func GetFirst4Net(elems []*net.IPNet) (*net.IPNet, error) {
for _, elem := range elems {
if elem == nil || elem.IP.To4() == nil {
continue
}
return elem, nil
}
return nil, errors.New("no IPv4 CIDRs found")
}
// GetFirst4 returns the first IPv4 address from the list of IP addresses.
// If no IPv4 addresses are found, an error is raised.
func GetFirst4(elems []net.IP) (net.IP, error) {
for _, elem := range elems {
if elem == nil || elem.To4() == nil {
continue
}
return elem, nil
}
return nil, errors.New("no IPv4 address found")
}
// GetFirst4String returns the first IPv4 address from a list of IP address strings.
// If no IPv4 addresses are found, an error is raised.
func GetFirst4String(elems []string) (string, error) {
ips := []net.IP{}
for _, elem := range elems {
for _, v := range strings.Split(elem, ",") {
ips = append(ips, net.ParseIP(v))
}
}
ip, err := GetFirst4(ips)
if err != nil {
return "", err
}
return ip.String(), nil
}

1
vendor/modules.txt vendored
View File

@ -3053,6 +3053,7 @@ k8s.io/metrics/pkg/client/external_metrics
# k8s.io/mount-utils v0.0.0 => github.com/k3s-io/kubernetes/staging/src/k8s.io/mount-utils v1.21.0-k3s1
k8s.io/mount-utils
# k8s.io/utils v0.0.0-20201110183641-67b214c5f920
## explicit
k8s.io/utils/buffer
k8s.io/utils/clock
k8s.io/utils/exec