2019-01-01 08:23:01 +00:00
|
|
|
package control
|
|
|
|
|
|
|
|
import (
|
2022-10-05 18:18:27 +00:00
|
|
|
"bufio"
|
2019-01-01 08:23:01 +00:00
|
|
|
"context"
|
2022-04-04 21:54:50 +00:00
|
|
|
"fmt"
|
2022-10-05 18:18:27 +00:00
|
|
|
"io"
|
2019-01-01 08:23:01 +00:00
|
|
|
"net"
|
|
|
|
"net/http"
|
2022-10-05 21:11:01 +00:00
|
|
|
"strconv"
|
2019-10-27 05:53:25 +00:00
|
|
|
"strings"
|
2022-05-17 19:25:43 +00:00
|
|
|
"sync"
|
2019-01-01 08:23:01 +00:00
|
|
|
|
2022-04-04 21:54:50 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/daemons/config"
|
|
|
|
"github.com/k3s-io/k3s/pkg/daemons/control/proxy"
|
2022-06-07 10:43:07 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/generated/clientset/versioned/scheme"
|
2022-04-04 21:54:50 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/nodeconfig"
|
2022-06-07 10:43:07 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/util"
|
2022-04-04 21:54:50 +00:00
|
|
|
"github.com/k3s-io/k3s/pkg/version"
|
2022-06-07 10:43:07 +00:00
|
|
|
"github.com/pkg/errors"
|
2019-05-09 22:05:51 +00:00
|
|
|
"github.com/rancher/remotedialer"
|
2022-03-28 20:45:39 +00:00
|
|
|
"github.com/sirupsen/logrus"
|
2022-04-04 21:54:50 +00:00
|
|
|
"github.com/yl2chen/cidranger"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
2022-06-07 10:43:07 +00:00
|
|
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
|
|
"k8s.io/apiserver/pkg/endpoints/handlers/responsewriters"
|
2019-01-01 08:23:01 +00:00
|
|
|
"k8s.io/apiserver/pkg/endpoints/request"
|
2022-04-04 21:54:50 +00:00
|
|
|
"k8s.io/client-go/kubernetes"
|
2019-01-01 08:23:01 +00:00
|
|
|
)
|
|
|
|
|
2022-06-29 18:50:21 +00:00
|
|
|
var defaultDialer = net.Dialer{}
|
|
|
|
|
2022-03-28 20:45:39 +00:00
|
|
|
func loggingErrorWriter(rw http.ResponseWriter, req *http.Request, code int, err error) {
|
2022-04-04 21:54:50 +00:00
|
|
|
logrus.Debugf("Tunnel server error: %d %v", code, err)
|
2022-03-28 20:45:39 +00:00
|
|
|
rw.WriteHeader(code)
|
|
|
|
rw.Write([]byte(err.Error()))
|
|
|
|
}
|
|
|
|
|
2022-04-04 21:54:50 +00:00
|
|
|
func setupTunnel(ctx context.Context, cfg *config.Control) (http.Handler, error) {
|
|
|
|
tunnel := &TunnelServer{
|
|
|
|
cidrs: cidranger.NewPCTrieRanger(),
|
|
|
|
config: cfg,
|
|
|
|
server: remotedialer.New(authorizer, loggingErrorWriter),
|
2022-05-17 19:25:43 +00:00
|
|
|
egress: map[string]bool{},
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
2023-04-20 22:28:57 +00:00
|
|
|
cfg.Runtime.ClusterControllerStarts["tunnel-server"] = tunnel.watch
|
2022-04-04 21:54:50 +00:00
|
|
|
return tunnel, nil
|
2019-01-01 08:23:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func authorizer(req *http.Request) (clientKey string, authed bool, err error) {
|
|
|
|
user, ok := request.UserFrom(req.Context())
|
|
|
|
if !ok {
|
|
|
|
return "", false, nil
|
|
|
|
}
|
|
|
|
|
2019-10-27 05:53:25 +00:00
|
|
|
if strings.HasPrefix(user.GetName(), "system:node:") {
|
|
|
|
return strings.TrimPrefix(user.GetName(), "system:node:"), true, nil
|
2019-01-01 08:23:01 +00:00
|
|
|
}
|
|
|
|
|
2019-10-27 05:53:25 +00:00
|
|
|
return "", false, nil
|
2019-01-01 08:23:01 +00:00
|
|
|
}
|
2022-04-04 21:54:50 +00:00
|
|
|
|
|
|
|
// explicit interface check
|
|
|
|
var _ http.Handler = &TunnelServer{}
|
|
|
|
|
|
|
|
type TunnelServer struct {
|
2022-05-17 19:25:43 +00:00
|
|
|
sync.Mutex
|
2022-04-04 21:54:50 +00:00
|
|
|
cidrs cidranger.Ranger
|
|
|
|
client kubernetes.Interface
|
|
|
|
config *config.Control
|
|
|
|
server *remotedialer.Server
|
2022-05-17 19:25:43 +00:00
|
|
|
egress map[string]bool
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// explicit interface check
|
2022-05-17 19:25:43 +00:00
|
|
|
var _ cidranger.RangerEntry = &tunnelEntry{}
|
2022-04-04 21:54:50 +00:00
|
|
|
|
2022-05-17 19:25:43 +00:00
|
|
|
type tunnelEntry struct {
|
2022-10-05 21:11:01 +00:00
|
|
|
kubeletPort string
|
|
|
|
nodeName string
|
|
|
|
cidr net.IPNet
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 19:25:43 +00:00
|
|
|
func (n *tunnelEntry) Network() net.IPNet {
|
2022-04-04 21:54:50 +00:00
|
|
|
return n.cidr
|
|
|
|
}
|
|
|
|
|
2022-10-05 21:11:01 +00:00
|
|
|
// Some ports can always be accessed via the tunnel server, at the loopback address.
|
|
|
|
// Other addresses and ports are only accessible via the tunnel on newer agents, when used by a pod.
|
|
|
|
func (n *tunnelEntry) IsReservedPort(port string) bool {
|
|
|
|
return n.kubeletPort != "" && (port == n.kubeletPort || port == config.StreamServerPort)
|
|
|
|
}
|
|
|
|
|
2022-04-04 21:54:50 +00:00
|
|
|
// ServeHTTP handles either CONNECT requests, or websocket requests to the remotedialer server
|
|
|
|
func (t *TunnelServer) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
|
|
|
|
logrus.Debugf("Tunnel server handing %s %s request for %s from %s", req.Proto, req.Method, req.URL, req.RemoteAddr)
|
|
|
|
if req.Method == http.MethodConnect {
|
|
|
|
t.serveConnect(resp, req)
|
|
|
|
} else {
|
|
|
|
t.server.ServeHTTP(resp, req)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 19:25:43 +00:00
|
|
|
// watch waits for the runtime core to become available,
|
|
|
|
// and registers OnChange handlers to observe changes to Nodes (and Endpoints if necessary).
|
|
|
|
func (t *TunnelServer) watch(ctx context.Context) {
|
|
|
|
logrus.Infof("Tunnel server egress proxy mode: %s", t.config.EgressSelectorMode)
|
|
|
|
|
|
|
|
if t.config.EgressSelectorMode == config.EgressSelectorModeDisabled {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-04-20 22:28:57 +00:00
|
|
|
t.config.Runtime.Core.Core().V1().Node().OnChange(ctx, version.Program+"-tunnel-server", t.onChangeNode)
|
|
|
|
switch t.config.EgressSelectorMode {
|
|
|
|
case config.EgressSelectorModeCluster, config.EgressSelectorModePod:
|
|
|
|
t.config.Runtime.Core.Core().V1().Pod().OnChange(ctx, version.Program+"-tunnel-server", t.onChangePod)
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
// onChangeNode updates the node address mappings by observing changes to nodes.
|
2022-04-04 21:54:50 +00:00
|
|
|
func (t *TunnelServer) onChangeNode(nodeName string, node *v1.Node) (*v1.Node, error) {
|
|
|
|
if node != nil {
|
2022-05-17 19:25:43 +00:00
|
|
|
t.Lock()
|
|
|
|
defer t.Unlock()
|
|
|
|
_, t.egress[nodeName] = node.Labels[nodeconfig.ClusterEgressLabel]
|
2022-04-04 21:54:50 +00:00
|
|
|
// Add all node IP addresses
|
|
|
|
for _, addr := range node.Status.Addresses {
|
|
|
|
if addr.Type == v1.NodeInternalIP || addr.Type == v1.NodeExternalIP {
|
2022-06-07 10:43:07 +00:00
|
|
|
if n, err := util.IPStringToIPNet(addr.Address); err == nil {
|
2022-05-17 19:25:43 +00:00
|
|
|
if node.DeletionTimestamp != nil {
|
2022-06-07 10:43:07 +00:00
|
|
|
logrus.Debugf("Tunnel server egress proxy removing Node %s IP %v", nodeName, n)
|
2022-05-17 19:25:43 +00:00
|
|
|
t.cidrs.Remove(*n)
|
|
|
|
} else {
|
2022-06-07 10:43:07 +00:00
|
|
|
logrus.Debugf("Tunnel server egress proxy updating Node %s IP %v", nodeName, n)
|
2022-10-05 21:11:01 +00:00
|
|
|
kubeletPort := strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)
|
|
|
|
t.cidrs.Insert(&tunnelEntry{cidr: *n, nodeName: nodeName, kubeletPort: kubeletPort})
|
2022-05-17 19:25:43 +00:00
|
|
|
}
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return node, nil
|
|
|
|
}
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
// onChangePod updates the pod address mappings by observing changes to pods.
|
|
|
|
func (t *TunnelServer) onChangePod(podName string, pod *v1.Pod) (*v1.Pod, error) {
|
|
|
|
if pod != nil {
|
2022-05-17 19:25:43 +00:00
|
|
|
t.Lock()
|
|
|
|
defer t.Unlock()
|
2022-06-07 10:43:07 +00:00
|
|
|
// Add all pod IPs, unless the pod uses host network
|
|
|
|
if !pod.Spec.HostNetwork {
|
|
|
|
nodeName := pod.Spec.NodeName
|
|
|
|
for _, ip := range pod.Status.PodIPs {
|
|
|
|
if cidr, err := util.IPStringToIPNet(ip.IP); err == nil {
|
|
|
|
if pod.DeletionTimestamp != nil {
|
|
|
|
logrus.Debugf("Tunnel server egress proxy removing Node %s Pod IP %v", nodeName, cidr)
|
|
|
|
t.cidrs.Remove(*cidr)
|
2022-05-17 19:25:43 +00:00
|
|
|
} else {
|
2022-06-07 10:43:07 +00:00
|
|
|
logrus.Debugf("Tunnel server egress proxy updating Node %s Pod IP %s", nodeName, cidr)
|
|
|
|
t.cidrs.Insert(&tunnelEntry{cidr: *cidr, nodeName: nodeName})
|
2022-05-17 19:25:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-06-07 10:43:07 +00:00
|
|
|
return pod, nil
|
2022-05-17 19:25:43 +00:00
|
|
|
}
|
|
|
|
|
2022-04-04 21:54:50 +00:00
|
|
|
// serveConnect attempts to handle the HTTP CONNECT request by dialing
|
|
|
|
// a connection, either locally or via the remotedialer tunnel.
|
|
|
|
func (t *TunnelServer) serveConnect(resp http.ResponseWriter, req *http.Request) {
|
2022-06-29 18:50:21 +00:00
|
|
|
bconn, err := t.dialBackend(req.Context(), req.Host)
|
2022-04-04 21:54:50 +00:00
|
|
|
if err != nil {
|
2022-06-07 10:43:07 +00:00
|
|
|
responsewriters.ErrorNegotiated(
|
2022-06-29 18:50:21 +00:00
|
|
|
apierrors.NewServiceUnavailable(err.Error()),
|
2022-06-07 10:43:07 +00:00
|
|
|
scheme.Codecs.WithoutConversion(), schema.GroupVersion{}, resp, req,
|
|
|
|
)
|
2022-04-04 21:54:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
hijacker, ok := resp.(http.Hijacker)
|
|
|
|
if !ok {
|
2022-06-07 10:43:07 +00:00
|
|
|
responsewriters.ErrorNegotiated(
|
|
|
|
apierrors.NewInternalError(errors.New("hijacking not supported")),
|
|
|
|
scheme.Codecs.WithoutConversion(), schema.GroupVersion{}, resp, req,
|
|
|
|
)
|
2022-04-04 21:54:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
resp.WriteHeader(http.StatusOK)
|
|
|
|
|
2022-10-05 18:18:27 +00:00
|
|
|
rconn, bufrw, err := hijacker.Hijack()
|
2022-04-04 21:54:50 +00:00
|
|
|
if err != nil {
|
2022-06-07 10:43:07 +00:00
|
|
|
responsewriters.ErrorNegotiated(
|
|
|
|
apierrors.NewInternalError(err),
|
|
|
|
scheme.Codecs.WithoutConversion(), schema.GroupVersion{}, resp, req,
|
|
|
|
)
|
2022-04-04 21:54:50 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-10-05 18:18:27 +00:00
|
|
|
proxy.Proxy(newConnReadWriteCloser(rconn, bufrw), bconn)
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// dialBackend determines where to route the connection request to, and returns
|
|
|
|
// a dialed connection if possible. Note that in the case of a remotedialer
|
|
|
|
// tunnel connection, the agent may return an error if the agent's authorizer
|
|
|
|
// denies the connection, or if there is some other error in actually dialing
|
|
|
|
// the requested endpoint.
|
2022-06-29 18:50:21 +00:00
|
|
|
func (t *TunnelServer) dialBackend(ctx context.Context, addr string) (net.Conn, error) {
|
2022-04-04 21:54:50 +00:00
|
|
|
host, port, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2022-07-21 21:40:09 +00:00
|
|
|
loopback := t.config.Loopback(true)
|
2022-04-04 21:54:50 +00:00
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
var nodeName string
|
2022-04-04 21:54:50 +00:00
|
|
|
var toKubelet, useTunnel bool
|
|
|
|
if ip := net.ParseIP(host); ip != nil {
|
2022-06-07 10:43:07 +00:00
|
|
|
// Destination is an IP address, which could be either a pod, or node by IP.
|
2022-04-04 21:54:50 +00:00
|
|
|
// We can only use the tunnel for egress to pods if the agent supports it.
|
|
|
|
if nets, err := t.cidrs.ContainingNetworks(ip); err == nil && len(nets) > 0 {
|
2022-05-17 19:25:43 +00:00
|
|
|
if n, ok := nets[0].(*tunnelEntry); ok {
|
2022-06-07 10:43:07 +00:00
|
|
|
nodeName = n.nodeName
|
2022-10-05 21:11:01 +00:00
|
|
|
if n.IsReservedPort(port) {
|
2022-05-17 19:25:43 +00:00
|
|
|
toKubelet = true
|
|
|
|
useTunnel = true
|
|
|
|
} else {
|
2022-06-07 10:43:07 +00:00
|
|
|
useTunnel = t.egress[nodeName]
|
2022-05-17 19:25:43 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
logrus.Debugf("Tunnel server egress proxy CIDR lookup returned unknown type for address %s", ip)
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2022-06-07 10:43:07 +00:00
|
|
|
// Destination is a node by name, it is safe to use the tunnel.
|
|
|
|
nodeName = host
|
2022-04-04 21:54:50 +00:00
|
|
|
toKubelet = true
|
|
|
|
useTunnel = true
|
|
|
|
}
|
|
|
|
|
2022-06-07 10:43:07 +00:00
|
|
|
// Always dial kubelet via the loopback address.
|
2022-04-04 21:54:50 +00:00
|
|
|
if toKubelet {
|
|
|
|
addr = fmt.Sprintf("%s:%s", loopback, port)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If connecting to something hosted by the local node, don't tunnel
|
2022-06-07 10:43:07 +00:00
|
|
|
if nodeName == t.config.ServerNodeName {
|
2022-04-04 21:54:50 +00:00
|
|
|
useTunnel = false
|
|
|
|
}
|
|
|
|
|
2022-06-29 18:50:21 +00:00
|
|
|
if useTunnel {
|
|
|
|
// Dialer(nodeName) returns a dial function that calls getDialer internally, which does the same locked session search
|
|
|
|
// as HasSession(nodeName). Rather than checking twice, just attempt the dial and handle the error if no session is found.
|
|
|
|
dialContext := t.server.Dialer(nodeName)
|
|
|
|
if conn, err := dialContext(ctx, "tcp", addr); err != nil {
|
|
|
|
logrus.Debugf("Tunnel server egress proxy dial error: %v", err)
|
|
|
|
if toKubelet && strings.HasPrefix(err.Error(), "failed to find Session for client") {
|
|
|
|
// Don't have a session and we're trying to remote dial the kubelet via loopback, reject the connection.
|
|
|
|
return conn, err
|
|
|
|
}
|
|
|
|
// any other error is ignored; fall back to to dialing directly.
|
|
|
|
} else {
|
|
|
|
// Have a session and it is safe to use for this destination, do so.
|
|
|
|
logrus.Debugf("Tunnel server egress proxy dialing %s via Session to %s", addr, nodeName)
|
|
|
|
return conn, err
|
|
|
|
}
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
|
|
|
|
2022-06-09 21:05:24 +00:00
|
|
|
// Don't have a session, the agent doesn't support tunneling to this destination, or
|
|
|
|
// the destination is local; fall back to direct connection.
|
|
|
|
logrus.Debugf("Tunnel server egress proxy dialing %s directly", addr)
|
2022-06-29 18:50:21 +00:00
|
|
|
return defaultDialer.DialContext(ctx, "tcp", addr)
|
2022-04-04 21:54:50 +00:00
|
|
|
}
|
2022-10-05 18:18:27 +00:00
|
|
|
|
|
|
|
// connReadWriteCloser bundles a net.Conn and a wrapping bufio.ReadWriter together into a type that
|
|
|
|
// meets the ReadWriteCloser interface. The http.Hijacker interface returns such a pair, and reads
|
|
|
|
// need to go through the buffered reader (because the http handler may have already read from the
|
|
|
|
// underlying connection), but writes and closes need to hit the connection directly.
|
|
|
|
type connReadWriteCloser struct {
|
|
|
|
conn net.Conn
|
|
|
|
once sync.Once
|
|
|
|
rw *bufio.ReadWriter
|
|
|
|
}
|
|
|
|
|
|
|
|
var _ io.ReadWriteCloser = &connReadWriteCloser{}
|
|
|
|
|
|
|
|
func newConnReadWriteCloser(conn net.Conn, rw *bufio.ReadWriter) *connReadWriteCloser {
|
|
|
|
return &connReadWriteCloser{conn: conn, rw: rw}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (crw *connReadWriteCloser) Read(p []byte) (n int, err error) {
|
|
|
|
return crw.rw.Read(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (crw *connReadWriteCloser) Write(b []byte) (n int, err error) {
|
|
|
|
return crw.conn.Write(b)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (crw *connReadWriteCloser) Close() (err error) {
|
|
|
|
crw.once.Do(func() { err = crw.conn.Close() })
|
|
|
|
return
|
|
|
|
}
|