mirror of
https://github.com/k3s-io/k3s.git
synced 2024-06-07 19:41:36 +00:00
Add FilterCN function to prevent SAN Stuffing
Wire up a node watch to collect addresses of server nodes, to prevent adding unauthorized SANs to the dynamiclistener cert. Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
parent
8c38d1169d
commit
aa76942d0f
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/k3s-io/k3s/pkg/util"
|
||||
"github.com/k3s-io/k3s/pkg/version"
|
||||
"github.com/rancher/wrangler/pkg/condition"
|
||||
coreclient "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
|
||||
@ -484,12 +485,12 @@ func (k *k3s) newDaemonSet(svc *core.Service) (*apps.DaemonSet, error) {
|
||||
},
|
||||
Tolerations: []core.Toleration{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Key: util.MasterRoleLabelKey,
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
{
|
||||
Key: "node-role.kubernetes.io/control-plane",
|
||||
Key: util.ControlPlaneRoleLabelKey,
|
||||
Operator: "Exists",
|
||||
Effect: "NoSchedule",
|
||||
},
|
||||
|
61
pkg/cluster/address_controller.go
Normal file
61
pkg/cluster/address_controller.go
Normal file
@ -0,0 +1,61 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/k3s-io/k3s/pkg/util"
|
||||
controllerv1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func registerAddressHandlers(ctx context.Context, c *Cluster) {
|
||||
nodes := c.config.Runtime.Core.Core().V1().Node()
|
||||
a := &addressesHandler{
|
||||
nodeController: nodes,
|
||||
allowed: map[string]bool{},
|
||||
}
|
||||
|
||||
for _, cn := range c.config.SANs {
|
||||
a.allowed[cn] = true
|
||||
}
|
||||
|
||||
logrus.Infof("Starting dynamiclistener CN filter node controller")
|
||||
nodes.OnChange(ctx, "server-cn-filter", a.sync)
|
||||
c.cnFilterFunc = a.filterCN
|
||||
}
|
||||
|
||||
type addressesHandler struct {
|
||||
nodeController controllerv1.NodeController
|
||||
allowed map[string]bool
|
||||
}
|
||||
|
||||
// filterCN filters a list of potential server CNs (hostnames or IPs), removing any which do not correspond to
|
||||
// valid cluster servers (control-plane or etcd), or an address explicitly added via the tls-san option.
|
||||
func (a *addressesHandler) filterCN(cns ...string) []string {
|
||||
if !a.nodeController.Informer().HasSynced() {
|
||||
return cns
|
||||
}
|
||||
|
||||
filteredCNs := make([]string, 0, len(cns))
|
||||
for _, cn := range cns {
|
||||
if a.allowed[cn] {
|
||||
filteredCNs = append(filteredCNs, cn)
|
||||
} else {
|
||||
logrus.Debugf("CN filter controller rejecting certificate CN: %s", cn)
|
||||
}
|
||||
}
|
||||
return filteredCNs
|
||||
}
|
||||
|
||||
// sync updates the allowed address list to include addresses for the node
|
||||
func (a *addressesHandler) sync(key string, node *v1.Node) (*v1.Node, error) {
|
||||
if node != nil {
|
||||
if node.Labels[util.ControlPlaneRoleLabelKey] != "" || node.Labels[util.ETCDRoleLabelKey] != "" {
|
||||
for _, address := range node.Status.Addresses {
|
||||
a.allowed[address.String()] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return node, nil
|
||||
}
|
@ -24,6 +24,7 @@ type Cluster struct {
|
||||
storageStarted bool
|
||||
saveBootstrap bool
|
||||
shouldBootstrap bool
|
||||
cnFilterFunc func(...string) []string
|
||||
}
|
||||
|
||||
// Start creates the dynamic tls listener, http request handler,
|
||||
|
@ -51,11 +51,15 @@ func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler,
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c.config.SANs = append(c.config.SANs, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc."+c.config.ClusterDomain)
|
||||
c.config.Runtime.ClusterControllerStarts["server-cn-filter"] = func(ctx context.Context) {
|
||||
registerAddressHandlers(ctx, c)
|
||||
}
|
||||
storage := tlsStorage(ctx, c.config.DataDir, c.config.Runtime)
|
||||
return wrapHandler(dynamiclistener.NewListener(tcp, storage, cert, key, dynamiclistener.Config{
|
||||
ExpirationDaysCheck: config.CertificateRenewDays,
|
||||
Organization: []string{version.Program},
|
||||
SANs: append(c.config.SANs, "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc."+c.config.ClusterDomain),
|
||||
SANs: c.config.SANs,
|
||||
CN: version.Program,
|
||||
TLSConfig: &tls.Config{
|
||||
ClientAuth: tls.RequestClientCert,
|
||||
@ -63,6 +67,7 @@ func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler,
|
||||
CipherSuites: c.config.TLSCipherSuites,
|
||||
NextProtos: []string{"h2", "http/1.1"},
|
||||
},
|
||||
FilterCN: c.filterCN,
|
||||
RegenerateCerts: func() bool {
|
||||
const regenerateDynamicListenerFile = "dynamic-cert-regenerate"
|
||||
dynamicListenerRegenFilePath := filepath.Join(c.config.DataDir, "tls", regenerateDynamicListenerFile)
|
||||
@ -75,6 +80,13 @@ func (c *Cluster) newListener(ctx context.Context) (net.Listener, http.Handler,
|
||||
}))
|
||||
}
|
||||
|
||||
func (c *Cluster) filterCN(cn ...string) []string {
|
||||
if c.cnFilterFunc != nil {
|
||||
return c.cnFilterFunc(cn...)
|
||||
}
|
||||
return cn
|
||||
}
|
||||
|
||||
// initClusterAndHTTPS sets up the dynamic tls listener, request router,
|
||||
// and cluster database. Once the database is up, it starts the supervisor http server.
|
||||
func (c *Cluster) initClusterAndHTTPS(ctx context.Context) error {
|
||||
|
@ -73,10 +73,6 @@ const (
|
||||
maxBackupRetention = 5
|
||||
maxConcurrentSnapshots = 1
|
||||
compressedExtension = ".zip"
|
||||
|
||||
MasterLabel = "node-role.kubernetes.io/master"
|
||||
ControlPlaneLabel = "node-role.kubernetes.io/control-plane"
|
||||
EtcdRoleLabel = "node-role.kubernetes.io/etcd"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/k3s-io/k3s/pkg/util"
|
||||
"github.com/k3s-io/k3s/pkg/version"
|
||||
controllerv1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -40,7 +41,7 @@ func (e *etcdMemberHandler) sync(key string, node *v1.Node) (*v1.Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if _, ok := node.Labels[EtcdRoleLabel]; !ok {
|
||||
if _, ok := node.Labels[util.ETCDRoleLabelKey]; !ok {
|
||||
logrus.Debugf("Node %s was not labeled etcd node, skipping sync", key)
|
||||
return node, nil
|
||||
}
|
||||
@ -98,7 +99,7 @@ func (e *etcdMemberHandler) sync(key string, node *v1.Node) (*v1.Node, error) {
|
||||
}
|
||||
|
||||
func (e *etcdMemberHandler) onRemove(key string, node *v1.Node) (*v1.Node, error) {
|
||||
if _, ok := node.Labels[EtcdRoleLabel]; !ok {
|
||||
if _, ok := node.Labels[util.ETCDRoleLabelKey]; !ok {
|
||||
logrus.Debugf("Node %s was not labeled etcd node, skipping etcd member removal", key)
|
||||
return node, nil
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/k3s-io/k3s/pkg/util"
|
||||
controllerv1 "github.com/rancher/wrangler/pkg/generated/controllers/core/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -51,7 +52,7 @@ func (m *metadataHandler) handleSelf(node *v1.Node) (*v1.Node, error) {
|
||||
if m.etcd.config.DisableETCD {
|
||||
if node.Annotations[NodeNameAnnotation] == "" &&
|
||||
node.Annotations[NodeAddressAnnotation] == "" &&
|
||||
node.Labels[EtcdRoleLabel] == "" {
|
||||
node.Labels[util.ETCDRoleLabelKey] == "" {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
@ -65,14 +66,14 @@ func (m *metadataHandler) handleSelf(node *v1.Node) (*v1.Node, error) {
|
||||
|
||||
delete(node.Annotations, NodeNameAnnotation)
|
||||
delete(node.Annotations, NodeAddressAnnotation)
|
||||
delete(node.Labels, EtcdRoleLabel)
|
||||
delete(node.Labels, util.ETCDRoleLabelKey)
|
||||
|
||||
return m.nodeController.Update(node)
|
||||
}
|
||||
|
||||
if node.Annotations[NodeNameAnnotation] == m.etcd.name &&
|
||||
node.Annotations[NodeAddressAnnotation] == m.etcd.address &&
|
||||
node.Labels[EtcdRoleLabel] == "true" {
|
||||
node.Labels[util.ETCDRoleLabelKey] == "true" {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
@ -86,7 +87,7 @@ func (m *metadataHandler) handleSelf(node *v1.Node) (*v1.Node, error) {
|
||||
|
||||
node.Annotations[NodeNameAnnotation] = m.etcd.name
|
||||
node.Annotations[NodeAddressAnnotation] = m.etcd.address
|
||||
node.Labels[EtcdRoleLabel] = "true"
|
||||
node.Labels[util.ETCDRoleLabelKey] = "true"
|
||||
|
||||
return m.nodeController.Update(node)
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ const (
|
||||
secretsProgressEvent string = "SecretsProgress"
|
||||
secretsUpdateCompleteEvent string = "SecretsUpdateComplete"
|
||||
secretsUpdateErrorEvent string = "SecretsUpdateError"
|
||||
controlPlaneRoleLabelKey string = "node-role.kubernetes.io/control-plane"
|
||||
)
|
||||
|
||||
type handler struct {
|
||||
@ -186,7 +185,7 @@ func (h *handler) validateReencryptStage(node *corev1.Node, annotation string) (
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
labelSelector := labels.Set{controlPlaneRoleLabelKey: "true"}.String()
|
||||
labelSelector := labels.Set{util.ControlPlaneRoleLabelKey: "true"}.String()
|
||||
nodes, err := h.nodes.List(metav1.ListOptions{LabelSelector: labelSelector})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"github.com/k3s-io/k3s/pkg/cluster"
|
||||
"github.com/k3s-io/k3s/pkg/daemons/config"
|
||||
"github.com/k3s-io/k3s/pkg/secretsencrypt"
|
||||
"github.com/k3s-io/k3s/pkg/util"
|
||||
"github.com/rancher/wrangler/pkg/generated/controllers/core"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -304,7 +305,7 @@ func getEncryptionHashAnnotation(core core.Interface) (string, string, error) {
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if _, ok := node.Labels[ControlPlaneRoleLabelKey]; !ok {
|
||||
if _, ok := node.Labels[util.ControlPlaneRoleLabelKey]; !ok {
|
||||
return "", "", fmt.Errorf("cannot manage secrets encryption on non control-plane node %s", nodeName)
|
||||
}
|
||||
if ann, ok := node.Annotations[secretsencrypt.EncryptionHashAnnotation]; ok {
|
||||
@ -323,7 +324,7 @@ func verifyEncryptionHashAnnotation(runtime *config.ControlRuntime, core core.In
|
||||
var firstHash string
|
||||
var firstNodeName string
|
||||
first := true
|
||||
labelSelector := labels.Set{ControlPlaneRoleLabelKey: "true"}.String()
|
||||
labelSelector := labels.Set{util.ControlPlaneRoleLabelKey: "true"}.String()
|
||||
nodes, err := core.V1().Node().List(metav1.ListOptions{LabelSelector: labelSelector})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -39,12 +39,6 @@ import (
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
const (
|
||||
MasterRoleLabelKey = "node-role.kubernetes.io/master"
|
||||
ControlPlaneRoleLabelKey = "node-role.kubernetes.io/control-plane"
|
||||
ETCDRoleLabelKey = "node-role.kubernetes.io/etcd"
|
||||
)
|
||||
|
||||
func ResolveDataDir(dataDir string) (string, error) {
|
||||
dataDir, err := datadir.Resolve(dataDir)
|
||||
return filepath.Join(dataDir, "server"), err
|
||||
@ -580,10 +574,10 @@ func setNodeLabelsAndAnnotations(ctx context.Context, nodes v1.NodeClient, confi
|
||||
if node.Labels == nil {
|
||||
node.Labels = make(map[string]string)
|
||||
}
|
||||
v, ok := node.Labels[ControlPlaneRoleLabelKey]
|
||||
v, ok := node.Labels[util.ControlPlaneRoleLabelKey]
|
||||
if !ok || v != "true" {
|
||||
node.Labels[ControlPlaneRoleLabelKey] = "true"
|
||||
node.Labels[MasterRoleLabelKey] = "true"
|
||||
node.Labels[util.ControlPlaneRoleLabelKey] = "true"
|
||||
node.Labels[util.MasterRoleLabelKey] = "true"
|
||||
}
|
||||
|
||||
if config.ControlConfig.EncryptSecrets {
|
||||
|
7
pkg/util/labels.go
Normal file
7
pkg/util/labels.go
Normal file
@ -0,0 +1,7 @@
|
||||
package util
|
||||
|
||||
const (
|
||||
MasterRoleLabelKey = "node-role.kubernetes.io/master"
|
||||
ControlPlaneRoleLabelKey = "node-role.kubernetes.io/control-plane"
|
||||
ETCDRoleLabelKey = "node-role.kubernetes.io/etcd"
|
||||
)
|
Loading…
Reference in New Issue
Block a user