mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
e8381db778
* Update Kubernetes to v1.21.0 * Update to golang v1.16.2 * Update dependent modules to track with upstream * Switch to upstream flannel * Track changes to upstream cloud-controller-manager and FeatureGates Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
361 lines
12 KiB
Go
361 lines
12 KiB
Go
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package node
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"net"
|
|
"os"
|
|
"strings"
|
|
"time"
|
|
|
|
"k8s.io/klog/v2"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/api/equality"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
"k8s.io/kubernetes/pkg/features"
|
|
utilnet "k8s.io/utils/net"
|
|
)
|
|
|
|
const (
|
|
// NodeUnreachablePodReason is the reason on a pod when its state cannot be confirmed as kubelet is unresponsive
|
|
// on the node it is (was) running.
|
|
NodeUnreachablePodReason = "NodeLost"
|
|
// NodeUnreachablePodMessage is the message on a pod when its state cannot be confirmed as kubelet is unresponsive
|
|
// on the node it is (was) running.
|
|
NodeUnreachablePodMessage = "Node %v which was running pod %v is unresponsive"
|
|
)
|
|
|
|
// GetHostname returns OS's hostname if 'hostnameOverride' is empty; otherwise, return 'hostnameOverride'.
|
|
func GetHostname(hostnameOverride string) (string, error) {
|
|
hostName := hostnameOverride
|
|
if len(hostName) == 0 {
|
|
nodeName, err := os.Hostname()
|
|
if err != nil {
|
|
return "", fmt.Errorf("couldn't determine hostname: %v", err)
|
|
}
|
|
hostName = nodeName
|
|
}
|
|
|
|
// Trim whitespaces first to avoid getting an empty hostname
|
|
// For linux, the hostname is read from file /proc/sys/kernel/hostname directly
|
|
hostName = strings.TrimSpace(hostName)
|
|
if len(hostName) == 0 {
|
|
return "", fmt.Errorf("empty hostname is invalid")
|
|
}
|
|
return strings.ToLower(hostName), nil
|
|
}
|
|
|
|
// NoMatchError is a typed implementation of the error interface. It indicates a failure to get a matching Node.
|
|
type NoMatchError struct {
|
|
addresses []v1.NodeAddress
|
|
}
|
|
|
|
// Error is the implementation of the conventional interface for
|
|
// representing an error condition, with the nil value representing no error.
|
|
func (e *NoMatchError) Error() string {
|
|
return fmt.Sprintf("no preferred addresses found; known addresses: %v", e.addresses)
|
|
}
|
|
|
|
// GetPreferredNodeAddress returns the address of the provided node, using the provided preference order.
|
|
// If none of the preferred address types are found, an error is returned.
|
|
func GetPreferredNodeAddress(node *v1.Node, preferredAddressTypes []v1.NodeAddressType) (string, error) {
|
|
for _, addressType := range preferredAddressTypes {
|
|
for _, address := range node.Status.Addresses {
|
|
if address.Type == addressType {
|
|
return address.Address, nil
|
|
}
|
|
}
|
|
}
|
|
return "", &NoMatchError{addresses: node.Status.Addresses}
|
|
}
|
|
|
|
// GetNodeHostIPs returns the provided node's IP(s); either a single "primary IP" for the
|
|
// node in a single-stack cluster, or a dual-stack pair of IPs in a dual-stack cluster
|
|
// (for nodes that actually have dual-stack IPs). Among other things, the IPs returned
|
|
// from this function are used as the `.status.PodIPs` values for host-network pods on the
|
|
// node, and the first IP is used as the `.status.HostIP` for all pods on the node.
|
|
func GetNodeHostIPs(node *v1.Node) ([]net.IP, error) {
|
|
// Re-sort the addresses with InternalIPs first and then ExternalIPs
|
|
allIPs := make([]net.IP, 0, len(node.Status.Addresses))
|
|
for _, addr := range node.Status.Addresses {
|
|
if addr.Type == v1.NodeInternalIP {
|
|
ip := net.ParseIP(addr.Address)
|
|
if ip != nil {
|
|
allIPs = append(allIPs, ip)
|
|
}
|
|
}
|
|
}
|
|
for _, addr := range node.Status.Addresses {
|
|
if addr.Type == v1.NodeExternalIP {
|
|
ip := net.ParseIP(addr.Address)
|
|
if ip != nil {
|
|
allIPs = append(allIPs, ip)
|
|
}
|
|
}
|
|
}
|
|
if len(allIPs) == 0 {
|
|
return nil, fmt.Errorf("host IP unknown; known addresses: %v", node.Status.Addresses)
|
|
}
|
|
|
|
nodeIPs := []net.IP{allIPs[0]}
|
|
if utilfeature.DefaultFeatureGate.Enabled(features.IPv6DualStack) {
|
|
for _, ip := range allIPs {
|
|
if utilnet.IsIPv6(ip) != utilnet.IsIPv6(nodeIPs[0]) {
|
|
nodeIPs = append(nodeIPs, ip)
|
|
break
|
|
}
|
|
}
|
|
}
|
|
|
|
return nodeIPs, nil
|
|
}
|
|
|
|
// GetNodeHostIP returns the provided node's "primary" IP; see GetNodeHostIPs for more details
|
|
func GetNodeHostIP(node *v1.Node) (net.IP, error) {
|
|
ips, err := GetNodeHostIPs(node)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
// GetNodeHostIPs always returns at least one IP if it didn't return an error
|
|
return ips[0], nil
|
|
}
|
|
|
|
// GetNodeIP returns an IP (as with GetNodeHostIP) for the node with the provided name.
|
|
// If required, it will wait for the node to be created.
|
|
func GetNodeIP(client clientset.Interface, name string) net.IP {
|
|
var nodeIP net.IP
|
|
backoff := wait.Backoff{
|
|
Steps: 6,
|
|
Duration: 1 * time.Second,
|
|
Factor: 2.0,
|
|
Jitter: 0.2,
|
|
}
|
|
|
|
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
|
node, err := client.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
|
if err != nil {
|
|
klog.Errorf("Failed to retrieve node info: %v", err)
|
|
return false, nil
|
|
}
|
|
nodeIP, err = GetNodeHostIP(node)
|
|
if err != nil {
|
|
klog.Errorf("Failed to retrieve node IP: %v", err)
|
|
return false, err
|
|
}
|
|
return true, nil
|
|
})
|
|
if err == nil {
|
|
klog.Infof("Successfully retrieved node IP: %v", nodeIP)
|
|
}
|
|
return nodeIP
|
|
}
|
|
|
|
type nodeForConditionPatch struct {
|
|
Status nodeStatusForPatch `json:"status"`
|
|
}
|
|
|
|
type nodeStatusForPatch struct {
|
|
Conditions []v1.NodeCondition `json:"conditions"`
|
|
}
|
|
|
|
// SetNodeCondition updates specific node condition with patch operation.
|
|
func SetNodeCondition(c clientset.Interface, node types.NodeName, condition v1.NodeCondition) error {
|
|
generatePatch := func(condition v1.NodeCondition) ([]byte, error) {
|
|
patch := nodeForConditionPatch{
|
|
Status: nodeStatusForPatch{
|
|
Conditions: []v1.NodeCondition{
|
|
condition,
|
|
},
|
|
},
|
|
}
|
|
patchBytes, err := json.Marshal(&patch)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return patchBytes, nil
|
|
}
|
|
condition.LastHeartbeatTime = metav1.NewTime(time.Now())
|
|
patch, err := generatePatch(condition)
|
|
if err != nil {
|
|
return nil
|
|
}
|
|
_, err = c.CoreV1().Nodes().PatchStatus(context.TODO(), string(node), patch)
|
|
return err
|
|
}
|
|
|
|
type nodeForCIDRMergePatch struct {
|
|
Spec nodeSpecForMergePatch `json:"spec"`
|
|
}
|
|
|
|
type nodeSpecForMergePatch struct {
|
|
PodCIDR string `json:"podCIDR"`
|
|
PodCIDRs []string `json:"podCIDRs,omitempty"`
|
|
}
|
|
|
|
// PatchNodeCIDR patches the specified node's CIDR to the given value.
|
|
func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) error {
|
|
patch := nodeForCIDRMergePatch{
|
|
Spec: nodeSpecForMergePatch{
|
|
PodCIDR: cidr,
|
|
},
|
|
}
|
|
patchBytes, err := json.Marshal(&patch)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
|
}
|
|
|
|
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
|
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value.
|
|
func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error {
|
|
// set the pod cidrs list and set the old pod cidr field
|
|
patch := nodeForCIDRMergePatch{
|
|
Spec: nodeSpecForMergePatch{
|
|
PodCIDR: cidrs[0],
|
|
PodCIDRs: cidrs,
|
|
},
|
|
}
|
|
|
|
patchBytes, err := json.Marshal(&patch)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to json.Marshal CIDR: %v", err)
|
|
}
|
|
klog.V(4).Infof("cidrs patch bytes are:%s", string(patchBytes))
|
|
if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
|
return fmt.Errorf("failed to patch node CIDR: %v", err)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// PatchNodeStatus patches node status.
|
|
func PatchNodeStatus(c v1core.CoreV1Interface, nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) (*v1.Node, []byte, error) {
|
|
patchBytes, err := preparePatchBytesforNodeStatus(nodeName, oldNode, newNode)
|
|
if err != nil {
|
|
return nil, nil, err
|
|
}
|
|
|
|
updatedNode, err := c.Nodes().Patch(context.TODO(), string(nodeName), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("failed to patch status %q for node %q: %v", patchBytes, nodeName, err)
|
|
}
|
|
return updatedNode, patchBytes, nil
|
|
}
|
|
|
|
func preparePatchBytesforNodeStatus(nodeName types.NodeName, oldNode *v1.Node, newNode *v1.Node) ([]byte, error) {
|
|
oldData, err := json.Marshal(oldNode)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to Marshal oldData for node %q: %v", nodeName, err)
|
|
}
|
|
|
|
// NodeStatus.Addresses is incorrectly annotated as patchStrategy=merge, which
|
|
// will cause strategicpatch.CreateTwoWayMergePatch to create an incorrect patch
|
|
// if it changed.
|
|
manuallyPatchAddresses := (len(oldNode.Status.Addresses) > 0) && !equality.Semantic.DeepEqual(oldNode.Status.Addresses, newNode.Status.Addresses)
|
|
|
|
// Reset spec to make sure only patch for Status or ObjectMeta is generated.
|
|
// Note that we don't reset ObjectMeta here, because:
|
|
// 1. This aligns with Nodes().UpdateStatus().
|
|
// 2. Some component does use this to update node annotations.
|
|
diffNode := newNode.DeepCopy()
|
|
diffNode.Spec = oldNode.Spec
|
|
if manuallyPatchAddresses {
|
|
diffNode.Status.Addresses = oldNode.Status.Addresses
|
|
}
|
|
newData, err := json.Marshal(diffNode)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to Marshal newData for node %q: %v", nodeName, err)
|
|
}
|
|
|
|
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for node %q: %v", nodeName, err)
|
|
}
|
|
if manuallyPatchAddresses {
|
|
patchBytes, err = fixupPatchForNodeStatusAddresses(patchBytes, newNode.Status.Addresses)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("failed to fix up NodeAddresses in patch for node %q: %v", nodeName, err)
|
|
}
|
|
}
|
|
|
|
return patchBytes, nil
|
|
}
|
|
|
|
// fixupPatchForNodeStatusAddresses adds a replace-strategy patch for Status.Addresses to
|
|
// the existing patch
|
|
func fixupPatchForNodeStatusAddresses(patchBytes []byte, addresses []v1.NodeAddress) ([]byte, error) {
|
|
// Given patchBytes='{"status": {"conditions": [ ... ], "phase": ...}}' and
|
|
// addresses=[{"type": "InternalIP", "address": "10.0.0.1"}], we need to generate:
|
|
//
|
|
// {
|
|
// "status": {
|
|
// "conditions": [ ... ],
|
|
// "phase": ...,
|
|
// "addresses": [
|
|
// {
|
|
// "type": "InternalIP",
|
|
// "address": "10.0.0.1"
|
|
// },
|
|
// {
|
|
// "$patch": "replace"
|
|
// }
|
|
// ]
|
|
// }
|
|
// }
|
|
|
|
var patchMap map[string]interface{}
|
|
if err := json.Unmarshal(patchBytes, &patchMap); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
addrBytes, err := json.Marshal(addresses)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
var addrArray []interface{}
|
|
if err := json.Unmarshal(addrBytes, &addrArray); err != nil {
|
|
return nil, err
|
|
}
|
|
addrArray = append(addrArray, map[string]interface{}{"$patch": "replace"})
|
|
|
|
status := patchMap["status"]
|
|
if status == nil {
|
|
status = map[string]interface{}{}
|
|
patchMap["status"] = status
|
|
}
|
|
statusMap, ok := status.(map[string]interface{})
|
|
if !ok {
|
|
return nil, fmt.Errorf("unexpected data in patch")
|
|
}
|
|
statusMap["addresses"] = addrArray
|
|
|
|
return json.Marshal(patchMap)
|
|
}
|