mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
f152f656a0
* Replace k3s cloud provider wrangler controller with core node informer Upstream k8s has exposed an interface for cloud providers to access the cloud controller manager's node cache and shared informer since Kubernetes 1.9. This is used by all the other in-tree cloud providers; we should use it too instead of running a dedicated wrangler controller. Doing so also appears to fix an intermittent issue with the uninitialized taint not getting cleared on nodes in CI. Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
96 lines
2.9 KiB
Go
96 lines
2.9 KiB
Go
package cloudprovider
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
|
|
"github.com/pkg/errors"
|
|
"github.com/rancher/k3s/pkg/version"
|
|
"github.com/sirupsen/logrus"
|
|
corev1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
cloudprovider "k8s.io/cloud-provider"
|
|
)
|
|
|
|
var (
|
|
InternalIPLabel = version.Program + ".io/internal-ip"
|
|
ExternalIPLabel = version.Program + ".io/external-ip"
|
|
HostnameLabel = version.Program + ".io/hostname"
|
|
)
|
|
|
|
func (k *k3s) AddSSHKeyToAllInstances(ctx context.Context, user string, keyData []byte) error {
|
|
return cloudprovider.NotImplemented
|
|
}
|
|
|
|
func (k *k3s) CurrentNodeName(ctx context.Context, hostname string) (types.NodeName, error) {
|
|
return types.NodeName(hostname), nil
|
|
}
|
|
|
|
func (k *k3s) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) {
|
|
return true, nil
|
|
}
|
|
|
|
func (k *k3s) InstanceID(ctx context.Context, nodeName types.NodeName) (string, error) {
|
|
if k.nodeInformerHasSynced == nil || !k.nodeInformerHasSynced() {
|
|
return "", errors.New("Node informer has not synced yet")
|
|
}
|
|
|
|
_, err := k.nodeInformer.Lister().Get(string(nodeName))
|
|
if err != nil {
|
|
return "", fmt.Errorf("Failed to find node %s: %v", nodeName, err)
|
|
}
|
|
return string(nodeName), nil
|
|
}
|
|
|
|
func (k *k3s) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) {
|
|
return true, cloudprovider.NotImplemented
|
|
}
|
|
|
|
func (k *k3s) InstanceType(ctx context.Context, name types.NodeName) (string, error) {
|
|
_, err := k.InstanceID(ctx, name)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
return version.Program, nil
|
|
}
|
|
|
|
func (k *k3s) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) {
|
|
return "", cloudprovider.NotImplemented
|
|
}
|
|
|
|
func (k *k3s) NodeAddresses(ctx context.Context, name types.NodeName) ([]corev1.NodeAddress, error) {
|
|
addresses := []corev1.NodeAddress{}
|
|
if k.nodeInformerHasSynced == nil || !k.nodeInformerHasSynced() {
|
|
return nil, errors.New("Node informer has not synced yet")
|
|
}
|
|
|
|
node, err := k.nodeInformer.Lister().Get(string(name))
|
|
if err != nil {
|
|
return nil, fmt.Errorf("Failed to find node %s: %v", name, err)
|
|
}
|
|
// check internal address
|
|
if node.Labels[InternalIPLabel] != "" {
|
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeInternalIP, Address: node.Labels[InternalIPLabel]})
|
|
} else {
|
|
logrus.Infof("Couldn't find node internal ip label on node %s", name)
|
|
}
|
|
|
|
// check external address
|
|
if node.Labels[ExternalIPLabel] != "" {
|
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeExternalIP, Address: node.Labels[ExternalIPLabel]})
|
|
}
|
|
|
|
// check hostname
|
|
if node.Labels[HostnameLabel] != "" {
|
|
addresses = append(addresses, corev1.NodeAddress{Type: corev1.NodeHostName, Address: node.Labels[HostnameLabel]})
|
|
} else {
|
|
logrus.Infof("Couldn't find node hostname label on node %s", name)
|
|
}
|
|
|
|
return addresses, nil
|
|
}
|
|
|
|
func (k *k3s) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]corev1.NodeAddress, error) {
|
|
return nil, cloudprovider.NotImplemented
|
|
}
|