Initial update of netpol and utils from upstream

Signed-off-by: Brad Davidson <brad.davidson@rancher.com>
This commit is contained in:
Brad Davidson 2021-02-01 11:03:43 -08:00 committed by Brad Davidson
parent 740b654d47
commit 29483d0651
7 changed files with 2245 additions and 1478 deletions

View File

@ -0,0 +1,61 @@
package netpol
import (
"reflect"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
)
func (npc *NetworkPolicyController) newNamespaceEventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
npc.handleNamespaceAdd(obj.(*api.Namespace))
},
UpdateFunc: func(oldObj, newObj interface{}) {
npc.handleNamespaceUpdate(oldObj.(*api.Namespace), newObj.(*api.Namespace))
},
DeleteFunc: func(obj interface{}) {
switch obj := obj.(type) {
case *api.Namespace:
npc.handleNamespaceDelete(obj)
return
case cache.DeletedFinalStateUnknown:
if namespace, ok := obj.Obj.(*api.Namespace); ok {
npc.handleNamespaceDelete(namespace)
return
}
default:
glog.Errorf("unexpected object type: %v", obj)
}
},
}
}
func (npc *NetworkPolicyController) handleNamespaceAdd(obj *api.Namespace) {
if obj.Labels == nil {
return
}
glog.V(2).Infof("Received update for namespace: %s", obj.Name)
npc.RequestFullSync()
}
func (npc *NetworkPolicyController) handleNamespaceUpdate(oldObj, newObj *api.Namespace) {
if reflect.DeepEqual(oldObj.Labels, newObj.Labels) {
return
}
glog.V(2).Infof("Received update for namespace: %s", newObj.Name)
npc.RequestFullSync()
}
func (npc *NetworkPolicyController) handleNamespaceDelete(obj *api.Namespace) {
if obj.Labels == nil {
return
}
glog.V(2).Infof("Received namespace: %s delete event", obj.Name)
npc.RequestFullSync()
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,545 @@
package netpol
import (
"context"
"net"
"strings"
"testing"
"time"
netv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/client-go/tools/cache"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
"github.com/cloudnativelabs/kube-router/pkg/options"
)
// newFakeInformersFromClient creates the different informers used in the uneventful network policy controller
func newFakeInformersFromClient(kubeClient clientset.Interface) (informers.SharedInformerFactory, cache.SharedIndexInformer, cache.SharedIndexInformer, cache.SharedIndexInformer) {
informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
podInformer := informerFactory.Core().V1().Pods().Informer()
npInformer := informerFactory.Networking().V1().NetworkPolicies().Informer()
nsInformer := informerFactory.Core().V1().Namespaces().Informer()
return informerFactory, podInformer, nsInformer, npInformer
}
type tNamespaceMeta struct {
name string
labels labels.Set
}
// Add resources to Informer Store object to simulate updating the Informer
func tAddToInformerStore(t *testing.T, informer cache.SharedIndexInformer, obj interface{}) {
err := informer.GetStore().Add(obj)
if err != nil {
t.Fatalf("error injecting object to Informer Store: %v", err)
}
}
type tNetpol struct {
name string
namespace string
podSelector metav1.LabelSelector
ingress []netv1.NetworkPolicyIngressRule
egress []netv1.NetworkPolicyEgressRule
}
// createFakeNetpol is a helper to create the network policy from the tNetpol struct
func (ns *tNetpol) createFakeNetpol(t *testing.T, informer cache.SharedIndexInformer) {
polTypes := make([]netv1.PolicyType, 0)
if len(ns.ingress) != 0 {
polTypes = append(polTypes, netv1.PolicyTypeIngress)
}
if len(ns.egress) != 0 {
polTypes = append(polTypes, netv1.PolicyTypeEgress)
}
tAddToInformerStore(t, informer,
&netv1.NetworkPolicy{ObjectMeta: metav1.ObjectMeta{Name: ns.name, Namespace: ns.namespace},
Spec: netv1.NetworkPolicySpec{
PodSelector: ns.podSelector,
PolicyTypes: polTypes,
Ingress: ns.ingress,
Egress: ns.egress,
}})
}
func (ns *tNetpol) findNetpolMatch(netpols *[]networkPolicyInfo) *networkPolicyInfo {
for _, netpol := range *netpols {
if netpol.namespace == ns.namespace && netpol.name == ns.name {
return &netpol
}
}
return nil
}
// tPodNamespaceMap is a helper to create sets of namespace,pod names
type tPodNamespaceMap map[string]map[string]bool
func (t tPodNamespaceMap) addPod(pod podInfo) {
if _, ok := t[pod.namespace]; !ok {
t[pod.namespace] = make(map[string]bool)
}
t[pod.namespace][pod.name] = true
}
func (t tPodNamespaceMap) delPod(pod podInfo) {
delete(t[pod.namespace], pod.name)
if len(t[pod.namespace]) == 0 {
delete(t, pod.namespace)
}
}
func (t tPodNamespaceMap) addNSPodInfo(ns, podname string) {
if _, ok := t[ns]; !ok {
t[ns] = make(map[string]bool)
}
t[ns][podname] = true
}
func (t tPodNamespaceMap) copy() tPodNamespaceMap {
newMap := make(tPodNamespaceMap)
for ns, pods := range t {
for p := range pods {
newMap.addNSPodInfo(ns, p)
}
}
return newMap
}
func (t tPodNamespaceMap) toStrSlice() (r []string) {
for ns, pods := range t {
for pod := range pods {
r = append(r, ns+":"+pod)
}
}
return
}
// tNewPodNamespaceMapFromTC creates a new tPodNamespaceMap from the info of the testcase
func tNewPodNamespaceMapFromTC(target map[string]string) tPodNamespaceMap {
newMap := make(tPodNamespaceMap)
for ns, pods := range target {
for _, pod := range strings.Split(pods, ",") {
newMap.addNSPodInfo(ns, pod)
}
}
return newMap
}
// tCreateFakePods creates the Pods and Namespaces that will be affected by the network policies
// returns a map like map[Namespace]map[PodName]bool
func tCreateFakePods(t *testing.T, podInformer cache.SharedIndexInformer, nsInformer cache.SharedIndexInformer) tPodNamespaceMap {
podNamespaceMap := make(tPodNamespaceMap)
pods := []podInfo{
{name: "Aa", labels: labels.Set{"app": "a"}, namespace: "nsA", ip: "1.1"},
{name: "Aaa", labels: labels.Set{"app": "a", "component": "a"}, namespace: "nsA", ip: "1.2"},
{name: "Aab", labels: labels.Set{"app": "a", "component": "b"}, namespace: "nsA", ip: "1.3"},
{name: "Aac", labels: labels.Set{"app": "a", "component": "c"}, namespace: "nsA", ip: "1.4"},
{name: "Ba", labels: labels.Set{"app": "a"}, namespace: "nsB", ip: "2.1"},
{name: "Baa", labels: labels.Set{"app": "a", "component": "a"}, namespace: "nsB", ip: "2.2"},
{name: "Bab", labels: labels.Set{"app": "a", "component": "b"}, namespace: "nsB", ip: "2.3"},
{name: "Ca", labels: labels.Set{"app": "a"}, namespace: "nsC", ip: "3.1"},
}
namespaces := []tNamespaceMeta{
{name: "nsA", labels: labels.Set{"name": "a", "team": "a"}},
{name: "nsB", labels: labels.Set{"name": "b", "team": "a"}},
{name: "nsC", labels: labels.Set{"name": "c"}},
{name: "nsD", labels: labels.Set{"name": "d"}},
}
ipsUsed := make(map[string]bool)
for _, pod := range pods {
podNamespaceMap.addPod(pod)
ipaddr := "1.1." + pod.ip
if ipsUsed[ipaddr] {
t.Fatalf("there is another pod with the same Ip address %s as this pod %s namespace %s",
ipaddr, pod.name, pod.name)
}
ipsUsed[ipaddr] = true
tAddToInformerStore(t, podInformer,
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: pod.name, Labels: pod.labels, Namespace: pod.namespace},
Status: v1.PodStatus{PodIP: ipaddr}})
}
for _, ns := range namespaces {
tAddToInformerStore(t, nsInformer, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns.name, Labels: ns.labels}})
}
return podNamespaceMap
}
// newFakeNode is a helper function for creating Nodes for testing.
func newFakeNode(name string, addr string) *v1.Node {
return &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: name},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("1"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
},
Addresses: []v1.NodeAddress{{Type: v1.NodeExternalIP, Address: addr}},
},
}
}
// newUneventfulNetworkPolicyController returns new NetworkPolicyController object without any event handler
func newUneventfulNetworkPolicyController(podInformer cache.SharedIndexInformer,
npInformer cache.SharedIndexInformer, nsInformer cache.SharedIndexInformer) (*NetworkPolicyController, error) {
npc := NetworkPolicyController{}
npc.syncPeriod = time.Hour
npc.nodeHostName = "node"
npc.nodeIP = net.IPv4(10, 10, 10, 10)
npc.podLister = podInformer.GetIndexer()
npc.nsLister = nsInformer.GetIndexer()
npc.npLister = npInformer.GetIndexer()
return &npc, nil
}
// tNetpolTestCase helper struct to define the inputs to the test case (netpols) and
// the expected selected targets (targetPods, inSourcePods for ingress targets, and outDestPods
// for egress targets) as maps with key being the namespace and a csv of pod names
type tNetpolTestCase struct {
name string
netpol tNetpol
targetPods tPodNamespaceMap
inSourcePods tPodNamespaceMap
outDestPods tPodNamespaceMap
}
// tGetNotTargetedPods finds set of pods that should not be targeted by netpol selectors
func tGetNotTargetedPods(podsGot []podInfo, wanted tPodNamespaceMap) []string {
unwanted := make(tPodNamespaceMap)
for _, pod := range podsGot {
if !wanted[pod.namespace][pod.name] {
unwanted.addPod(pod)
}
}
return unwanted.toStrSlice()
}
// tGetTargetPodsMissing returns the set of pods that should have been targeted but were missing by netpol selectors
func tGetTargetPodsMissing(podsGot []podInfo, wanted tPodNamespaceMap) []string {
missing := wanted.copy()
for _, pod := range podsGot {
if wanted[pod.namespace][pod.name] {
missing.delPod(pod)
}
}
return missing.toStrSlice()
}
func tListOfPodsFromTargets(target map[string]podInfo) (r []podInfo) {
for _, pod := range target {
r = append(r, pod)
}
return
}
func testForMissingOrUnwanted(t *testing.T, targetMsg string, got []podInfo, wanted tPodNamespaceMap) {
if missing := tGetTargetPodsMissing(got, wanted); len(missing) != 0 {
t.Errorf("Some Pods were not selected %s: %s", targetMsg, strings.Join(missing, ", "))
}
if missing := tGetNotTargetedPods(got, wanted); len(missing) != 0 {
t.Errorf("Some Pods NOT expected were selected on %s: %s", targetMsg, strings.Join(missing, ", "))
}
}
func newMinimalKubeRouterConfig(clusterIPCIDR string, nodePortRange string, hostNameOverride string, externalIPs []string) *options.KubeRouterConfig {
kubeConfig := options.NewKubeRouterConfig()
if clusterIPCIDR != "" {
kubeConfig.ClusterIPCIDR = clusterIPCIDR
}
if nodePortRange != "" {
kubeConfig.NodePortRange = nodePortRange
}
if hostNameOverride != "" {
kubeConfig.HostnameOverride = hostNameOverride
}
if externalIPs != nil {
kubeConfig.ExternalIPCIDRs = externalIPs
}
return kubeConfig
}
type tNetPolConfigTestCase struct {
name string
config *options.KubeRouterConfig
expectError bool
errorText string
}
func TestNewNetworkPolicySelectors(t *testing.T) {
testCases := []tNetpolTestCase{
{
name: "Non-Existent Namespace",
netpol: tNetpol{name: "nsXX", podSelector: metav1.LabelSelector{}, namespace: "nsXX"},
targetPods: nil,
},
{
name: "Empty Namespace",
netpol: tNetpol{name: "nsD", podSelector: metav1.LabelSelector{}, namespace: "nsD"},
targetPods: nil,
},
{
name: "All pods in nsA",
netpol: tNetpol{name: "nsA", podSelector: metav1.LabelSelector{}, namespace: "nsA"},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
},
{
name: "All pods in nsB",
netpol: tNetpol{name: "nsB", podSelector: metav1.LabelSelector{}, namespace: "nsB"},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsB": "Ba,Baa,Bab"}),
},
{
name: "All pods in nsC",
netpol: tNetpol{name: "nsC", podSelector: metav1.LabelSelector{}, namespace: "nsC"},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsC": "Ca"}),
},
{
name: "All pods app=a in nsA using matchExpressions",
netpol: tNetpol{
name: "nsA-app-a-matchExpression",
namespace: "nsA",
podSelector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "app",
Operator: "In",
Values: []string{"a"},
}}}},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
},
{
name: "All pods app=a in nsA using matchLabels",
netpol: tNetpol{name: "nsA-app-a-matchLabels", namespace: "nsA",
podSelector: metav1.LabelSelector{
MatchLabels: map[string]string{"app": "a"}}},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
},
{
name: "All pods app=a in nsA using matchLabels ingress allow from any pod in nsB",
netpol: tNetpol{name: "nsA-app-a-matchLabels-2", namespace: "nsA",
podSelector: metav1.LabelSelector{MatchLabels: map[string]string{"app": "a"}},
ingress: []netv1.NetworkPolicyIngressRule{{From: []netv1.NetworkPolicyPeer{{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": "b"}}}}}},
},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
inSourcePods: tNewPodNamespaceMapFromTC(map[string]string{"nsB": "Ba,Baa,Bab"}),
},
{
name: "All pods app=a in nsA using matchLabels ingress allow from pod in nsB with component = b",
netpol: tNetpol{name: "nsA-app-a-matchExpression-2", namespace: "nsA",
podSelector: metav1.LabelSelector{MatchLabels: map[string]string{"app": "a"}},
ingress: []netv1.NetworkPolicyIngressRule{{From: []netv1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": "b"}},
PodSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "component",
Operator: "In",
Values: []string{"b"},
}}},
},
}}}},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aa,Aaa,Aab,Aac"}),
inSourcePods: tNewPodNamespaceMapFromTC(map[string]string{"nsB": "Bab"}),
},
{
name: "All pods app=a,component=b or c in nsA",
netpol: tNetpol{name: "nsA-app-a-matchExpression-3", namespace: "nsA",
podSelector: metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "app",
Operator: "In",
Values: []string{"a"},
},
{
Key: "component",
Operator: "In",
Values: []string{"b", "c"},
}}},
},
targetPods: tNewPodNamespaceMapFromTC(map[string]string{"nsA": "Aab,Aac"}),
},
}
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*newFakeNode("node", "10.10.10.10")}})
informerFactory, podInformer, nsInformer, netpolInformer := newFakeInformersFromClient(client)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
informerFactory.Start(ctx.Done())
cache.WaitForCacheSync(ctx.Done(), podInformer.HasSynced)
krNetPol, _ := newUneventfulNetworkPolicyController(podInformer, netpolInformer, nsInformer)
tCreateFakePods(t, podInformer, nsInformer)
for _, test := range testCases {
test.netpol.createFakeNetpol(t, netpolInformer)
}
netpols, err := krNetPol.buildNetworkPoliciesInfo()
if err != nil {
t.Errorf("Problems building policies")
}
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
np := test.netpol.findNetpolMatch(&netpols)
testForMissingOrUnwanted(t, "targetPods", tListOfPodsFromTargets(np.targetPods), test.targetPods)
for _, ingress := range np.ingressRules {
testForMissingOrUnwanted(t, "ingress srcPods", ingress.srcPods, test.inSourcePods)
}
for _, egress := range np.egressRules {
testForMissingOrUnwanted(t, "egress dstPods", egress.dstPods, test.outDestPods)
}
})
}
}
func TestNetworkPolicyController(t *testing.T) {
testCases := []tNetPolConfigTestCase{
{
"Default options are successful",
newMinimalKubeRouterConfig("", "", "node", nil),
false,
"",
},
{
"Missing nodename fails appropriately",
newMinimalKubeRouterConfig("", "", "", nil),
true,
"Failed to identify the node by NODE_NAME, hostname or --hostname-override",
},
{
"Test bad cluster CIDR (not properly formatting ip address)",
newMinimalKubeRouterConfig("10.10.10", "", "node", nil),
true,
"failed to get parse --service-cluster-ip-range parameter: invalid CIDR address: 10.10.10",
},
{
"Test bad cluster CIDR (not using an ip address)",
newMinimalKubeRouterConfig("foo", "", "node", nil),
true,
"failed to get parse --service-cluster-ip-range parameter: invalid CIDR address: foo",
},
{
"Test bad cluster CIDR (using an ip address that is not a CIDR)",
newMinimalKubeRouterConfig("10.10.10.10", "", "node", nil),
true,
"failed to get parse --service-cluster-ip-range parameter: invalid CIDR address: 10.10.10.10",
},
{
"Test good cluster CIDR (using single IP with a /32)",
newMinimalKubeRouterConfig("10.10.10.10/32", "", "node", nil),
false,
"",
},
{
"Test good cluster CIDR (using normal range with /24)",
newMinimalKubeRouterConfig("10.10.10.0/24", "", "node", nil),
false,
"",
},
{
"Test bad node port specification (using commas)",
newMinimalKubeRouterConfig("", "8080,8081", "node", nil),
true,
"failed to parse node port range given: '8080,8081' please see specification in help text",
},
{
"Test bad node port specification (not using numbers)",
newMinimalKubeRouterConfig("", "foo:bar", "node", nil),
true,
"failed to parse node port range given: 'foo:bar' please see specification in help text",
},
{
"Test bad node port specification (using anything in addition to range)",
newMinimalKubeRouterConfig("", "8080,8081-8090", "node", nil),
true,
"failed to parse node port range given: '8080,8081-8090' please see specification in help text",
},
{
"Test bad node port specification (using reversed range)",
newMinimalKubeRouterConfig("", "8090-8080", "node", nil),
true,
"port 1 is greater than or equal to port 2 in range given: '8090-8080'",
},
{
"Test bad node port specification (port out of available range)",
newMinimalKubeRouterConfig("", "132000-132001", "node", nil),
true,
"could not parse first port number from range given: '132000-132001'",
},
{
"Test good node port specification (using colon separator)",
newMinimalKubeRouterConfig("", "8080:8090", "node", nil),
false,
"",
},
{
"Test good node port specification (using hyphen separator)",
newMinimalKubeRouterConfig("", "8080-8090", "node", nil),
false,
"",
},
{
"Test bad external IP CIDR (not properly formatting ip address)",
newMinimalKubeRouterConfig("", "", "node", []string{"199.10.10"}),
true,
"failed to get parse --service-external-ip-range parameter: '199.10.10'. Error: invalid CIDR address: 199.10.10",
},
{
"Test bad external IP CIDR (not using an ip address)",
newMinimalKubeRouterConfig("", "", "node", []string{"foo"}),
true,
"failed to get parse --service-external-ip-range parameter: 'foo'. Error: invalid CIDR address: foo",
},
{
"Test bad external IP CIDR (using an ip address that is not a CIDR)",
newMinimalKubeRouterConfig("", "", "node", []string{"199.10.10.10"}),
true,
"failed to get parse --service-external-ip-range parameter: '199.10.10.10'. Error: invalid CIDR address: 199.10.10.10",
},
{
"Test bad external IP CIDR (making sure that it processes all items in the list)",
newMinimalKubeRouterConfig("", "", "node", []string{"199.10.10.10/32", "199.10.10.11"}),
true,
"failed to get parse --service-external-ip-range parameter: '199.10.10.11'. Error: invalid CIDR address: 199.10.10.11",
},
{
"Test good external IP CIDR (using single IP with a /32)",
newMinimalKubeRouterConfig("", "", "node", []string{"199.10.10.10/32"}),
false,
"",
},
{
"Test good external IP CIDR (using normal range with /24)",
newMinimalKubeRouterConfig("", "", "node", []string{"199.10.10.10/24"}),
false,
"",
},
}
client := fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*newFakeNode("node", "10.10.10.10")}})
_, podInformer, nsInformer, netpolInformer := newFakeInformersFromClient(client)
for _, test := range testCases {
t.Run(test.name, func(t *testing.T) {
_, err := NewNetworkPolicyController(client, test.config, podInformer, netpolInformer, nsInformer)
if err == nil && test.expectError {
t.Error("This config should have failed, but it was successful instead")
} else if err != nil {
// Unfortunately without doing a ton of extra refactoring work, we can't remove this reference easily
// from the controllers start up. Luckily it's one of the last items to be processed in the controller
// so for now we'll consider that if we hit this error that we essentially didn't hit an error at all
// TODO: refactor NPC to use an injectable interface for ipset operations
if !test.expectError && err.Error() != "Ipset utility not found" {
t.Errorf("This config should have been successful, but it failed instead. Error: %s", err)
} else if test.expectError && err.Error() != test.errorText {
t.Errorf("Expected error: '%s' but instead got: '%s'", test.errorText, err)
}
}
})
}
}
// Ref:
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/podgc/gc_controller_test.go
// https://github.com/kubernetes/kubernetes/blob/master/pkg/controller/testutil/test_utils.go

380
pkg/agent/netpol/pod.go Normal file
View File

@ -0,0 +1,380 @@
package netpol
import (
"crypto/sha256"
"encoding/base32"
"fmt"
"strings"
"github.com/coreos/go-iptables/iptables"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
)
func (npc *NetworkPolicyController) newPodEventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
npc.OnPodUpdate(obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
newPoObj := newObj.(*api.Pod)
oldPoObj := oldObj.(*api.Pod)
if newPoObj.Status.Phase != oldPoObj.Status.Phase || newPoObj.Status.PodIP != oldPoObj.Status.PodIP {
// for the network policies, we are only interested in pod status phase change or IP change
npc.OnPodUpdate(newObj)
}
},
DeleteFunc: func(obj interface{}) {
npc.handlePodDelete(obj)
},
}
}
// OnPodUpdate handles updates to pods from the Kubernetes api server
func (npc *NetworkPolicyController) OnPodUpdate(obj interface{}) {
pod := obj.(*api.Pod)
glog.V(2).Infof("Received update to pod: %s/%s", pod.Namespace, pod.Name)
npc.RequestFullSync()
}
func (npc *NetworkPolicyController) handlePodDelete(obj interface{}) {
pod, ok := obj.(*api.Pod)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("unexpected object type: %v", obj)
return
}
if pod, ok = tombstone.Obj.(*api.Pod); !ok {
glog.Errorf("unexpected object type: %v", obj)
return
}
}
glog.V(2).Infof("Received pod: %s/%s delete event", pod.Namespace, pod.Name)
npc.RequestFullSync()
}
func (npc *NetworkPolicyController) syncPodFirewallChains(networkPoliciesInfo []networkPolicyInfo, version string) (map[string]bool, error) {
activePodFwChains := make(map[string]bool)
iptablesCmdHandler, err := iptables.New()
if err != nil {
glog.Fatalf("Failed to initialize iptables executor: %s", err.Error())
}
dropUnmarkedTrafficRules := func(podName, podNamespace, podFwChainName string) error {
// add rule to log the packets that will be dropped due to network policy enforcement
comment := "rule to log dropped traffic POD name:" + podName + " namespace: " + podNamespace
args := []string{"-m", "comment", "--comment", comment, "-m", "mark", "!", "--mark", "0x10000/0x10000", "-j", "NFLOG", "--nflog-group", "100", "-m", "limit", "--limit", "10/minute", "--limit-burst", "10"}
err = iptablesCmdHandler.AppendUnique("filter", podFwChainName, args...)
if err != nil {
return fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
// add rule to DROP if no applicable network policy permits the traffic
comment = "rule to REJECT traffic destined for POD name:" + podName + " namespace: " + podNamespace
args = []string{"-m", "comment", "--comment", comment, "-m", "mark", "!", "--mark", "0x10000/0x10000", "-j", "REJECT"}
err = iptablesCmdHandler.AppendUnique("filter", podFwChainName, args...)
if err != nil {
return fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
// reset mark to let traffic pass through rest of the chains
args = []string{"-j", "MARK", "--set-mark", "0/0x10000"}
err = iptablesCmdHandler.AppendUnique("filter", podFwChainName, args...)
if err != nil {
return fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
return nil
}
// loop through the pods running on the node which to which ingress network policies to be applied
ingressNetworkPolicyEnabledPods, err := npc.getIngressNetworkPolicyEnabledPods(networkPoliciesInfo, npc.nodeIP.String())
if err != nil {
return nil, err
}
for _, pod := range *ingressNetworkPolicyEnabledPods {
// below condition occurs when we get trasient update while removing or adding pod
// subseqent update will do the correct action
if len(pod.ip) == 0 || pod.ip == "" {
continue
}
// ensure pod specific firewall chain exist for all the pods that need ingress firewall
podFwChainName := podFirewallChainName(pod.namespace, pod.name, version)
err = iptablesCmdHandler.NewChain("filter", podFwChainName)
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
activePodFwChains[podFwChainName] = true
// add entries in pod firewall to run through required network policies
for _, policy := range networkPoliciesInfo {
if _, ok := policy.targetPods[pod.ip]; ok {
comment := "run through nw policy " + policy.name
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
args := []string{"-m", "comment", "--comment", comment, "-j", policyChainName}
exists, err := iptablesCmdHandler.Exists("filter", podFwChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", podFwChainName, 1, args...)
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
}
}
comment := "rule to permit the traffic traffic to pods when source is the pod's local node"
args := []string{"-m", "comment", "--comment", comment, "-m", "addrtype", "--src-type", "LOCAL", "-d", pod.ip, "-j", "ACCEPT"}
exists, err := iptablesCmdHandler.Exists("filter", podFwChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", podFwChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
// ensure statefull firewall, that permits return traffic for the traffic originated by the pod
comment = "rule for stateful firewall for pod"
args = []string{"-m", "comment", "--comment", comment, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}
exists, err = iptablesCmdHandler.Exists("filter", podFwChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", podFwChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
// ensure there is rule in filter table and FORWARD chain to jump to pod specific firewall chain
// this rule applies to the traffic getting routed (coming for other node pods)
comment = "rule to jump traffic destined to POD name:" + pod.name + " namespace: " + pod.namespace +
" to chain " + podFwChainName
args = []string{"-m", "comment", "--comment", comment, "-d", pod.ip, "-j", podFwChainName}
exists, err = iptablesCmdHandler.Exists("filter", kubeForwardChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", kubeForwardChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
// ensure there is rule in filter table and OUTPUT chain to jump to pod specific firewall chain
// this rule applies to the traffic from a pod getting routed back to another pod on same node by service proxy
exists, err = iptablesCmdHandler.Exists("filter", kubeOutputChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", kubeOutputChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
// ensure there is rule in filter table and forward chain to jump to pod specific firewall chain
// this rule applies to the traffic getting switched (coming for same node pods)
comment = "rule to jump traffic destined to POD name:" + pod.name + " namespace: " + pod.namespace +
" to chain " + podFwChainName
args = []string{"-m", "physdev", "--physdev-is-bridged",
"-m", "comment", "--comment", comment,
"-d", pod.ip,
"-j", podFwChainName}
exists, err = iptablesCmdHandler.Exists("filter", kubeForwardChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err = iptablesCmdHandler.Insert("filter", kubeForwardChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
err = dropUnmarkedTrafficRules(pod.name, pod.namespace, podFwChainName)
if err != nil {
return nil, err
}
}
// loop through the pods running on the node which egress network policies to be applied
egressNetworkPolicyEnabledPods, err := npc.getEgressNetworkPolicyEnabledPods(networkPoliciesInfo, npc.nodeIP.String())
if err != nil {
return nil, err
}
for _, pod := range *egressNetworkPolicyEnabledPods {
// below condition occurs when we get trasient update while removing or adding pod
// subseqent update will do the correct action
if len(pod.ip) == 0 || pod.ip == "" {
continue
}
// ensure pod specific firewall chain exist for all the pods that need egress firewall
podFwChainName := podFirewallChainName(pod.namespace, pod.name, version)
err = iptablesCmdHandler.NewChain("filter", podFwChainName)
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
activePodFwChains[podFwChainName] = true
// add entries in pod firewall to run through required network policies
for _, policy := range networkPoliciesInfo {
if _, ok := policy.targetPods[pod.ip]; ok {
comment := "run through nw policy " + policy.name
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
args := []string{"-m", "comment", "--comment", comment, "-j", policyChainName}
exists, err := iptablesCmdHandler.Exists("filter", podFwChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", podFwChainName, 1, args...)
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
}
}
// ensure statefull firewall, that permits return traffic for the traffic originated by the pod
comment := "rule for stateful firewall for pod"
args := []string{"-m", "comment", "--comment", comment, "-m", "conntrack", "--ctstate", "RELATED,ESTABLISHED", "-j", "ACCEPT"}
exists, err := iptablesCmdHandler.Exists("filter", podFwChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.Insert("filter", podFwChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
egressFilterChains := []string{kubeInputChainName, kubeForwardChainName, kubeOutputChainName}
for _, chain := range egressFilterChains {
// ensure there is rule in filter table and FORWARD chain to jump to pod specific firewall chain
// this rule applies to the traffic getting forwarded/routed (traffic from the pod destinted
// to pod on a different node)
comment = "rule to jump traffic from POD name:" + pod.name + " namespace: " + pod.namespace +
" to chain " + podFwChainName
args = []string{"-m", "comment", "--comment", comment, "-s", pod.ip, "-j", podFwChainName}
exists, err = iptablesCmdHandler.Exists("filter", chain, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err := iptablesCmdHandler.AppendUnique("filter", chain, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
}
// ensure there is rule in filter table and forward chain to jump to pod specific firewall chain
// this rule applies to the traffic getting switched (coming for same node pods)
comment = "rule to jump traffic from POD name:" + pod.name + " namespace: " + pod.namespace +
" to chain " + podFwChainName
args = []string{"-m", "physdev", "--physdev-is-bridged",
"-m", "comment", "--comment", comment,
"-s", pod.ip,
"-j", podFwChainName}
exists, err = iptablesCmdHandler.Exists("filter", kubeForwardChainName, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
if !exists {
err = iptablesCmdHandler.Insert("filter", kubeForwardChainName, 1, args...)
if err != nil {
return nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
}
err = dropUnmarkedTrafficRules(pod.name, pod.namespace, podFwChainName)
if err != nil {
return nil, err
}
}
return activePodFwChains, nil
}
func (npc *NetworkPolicyController) getIngressNetworkPolicyEnabledPods(networkPoliciesInfo []networkPolicyInfo, nodeIP string) (*map[string]podInfo, error) {
nodePods := make(map[string]podInfo)
for _, obj := range npc.podLister.List() {
pod := obj.(*api.Pod)
if strings.Compare(pod.Status.HostIP, nodeIP) != 0 {
continue
}
for _, policy := range networkPoliciesInfo {
if policy.namespace != pod.ObjectMeta.Namespace {
continue
}
_, ok := policy.targetPods[pod.Status.PodIP]
if ok && (policy.policyType == "both" || policy.policyType == "ingress") {
glog.V(2).Infof("Found pod name: " + pod.ObjectMeta.Name + " namespace: " + pod.ObjectMeta.Namespace + " for which network policies need to be applied.")
nodePods[pod.Status.PodIP] = podInfo{ip: pod.Status.PodIP,
name: pod.ObjectMeta.Name,
namespace: pod.ObjectMeta.Namespace,
labels: pod.ObjectMeta.Labels}
break
}
}
}
return &nodePods, nil
}
func (npc *NetworkPolicyController) getEgressNetworkPolicyEnabledPods(networkPoliciesInfo []networkPolicyInfo, nodeIP string) (*map[string]podInfo, error) {
nodePods := make(map[string]podInfo)
for _, obj := range npc.podLister.List() {
pod := obj.(*api.Pod)
if strings.Compare(pod.Status.HostIP, nodeIP) != 0 {
continue
}
for _, policy := range networkPoliciesInfo {
if policy.namespace != pod.ObjectMeta.Namespace {
continue
}
_, ok := policy.targetPods[pod.Status.PodIP]
if ok && (policy.policyType == "both" || policy.policyType == "egress") {
glog.V(2).Infof("Found pod name: " + pod.ObjectMeta.Name + " namespace: " + pod.ObjectMeta.Namespace + " for which network policies need to be applied.")
nodePods[pod.Status.PodIP] = podInfo{ip: pod.Status.PodIP,
name: pod.ObjectMeta.Name,
namespace: pod.ObjectMeta.Namespace,
labels: pod.ObjectMeta.Labels}
break
}
}
}
return &nodePods, nil
}
func podFirewallChainName(namespace, podName string, version string) string {
hash := sha256.Sum256([]byte(namespace + podName + version))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubePodFirewallChainPrefix + encoded[:16]
}

817
pkg/agent/netpol/policy.go Normal file
View File

@ -0,0 +1,817 @@
package netpol
import (
"crypto/sha256"
"encoding/base32"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/cloudnativelabs/kube-router/pkg/metrics"
"github.com/cloudnativelabs/kube-router/pkg/utils"
"github.com/coreos/go-iptables/iptables"
"github.com/golang/glog"
api "k8s.io/api/core/v1"
networking "k8s.io/api/networking/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
)
func (npc *NetworkPolicyController) newNetworkPolicyEventHandler() cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
npc.OnNetworkPolicyUpdate(obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
npc.OnNetworkPolicyUpdate(newObj)
},
DeleteFunc: func(obj interface{}) {
npc.handleNetworkPolicyDelete(obj)
},
}
}
// OnNetworkPolicyUpdate handles updates to network policy from the kubernetes api server
func (npc *NetworkPolicyController) OnNetworkPolicyUpdate(obj interface{}) {
netpol := obj.(*networking.NetworkPolicy)
glog.V(2).Infof("Received update for network policy: %s/%s", netpol.Namespace, netpol.Name)
npc.RequestFullSync()
}
func (npc *NetworkPolicyController) handleNetworkPolicyDelete(obj interface{}) {
netpol, ok := obj.(*networking.NetworkPolicy)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("unexpected object type: %v", obj)
return
}
if netpol, ok = tombstone.Obj.(*networking.NetworkPolicy); !ok {
glog.Errorf("unexpected object type: %v", obj)
return
}
}
glog.V(2).Infof("Received network policy: %s/%s delete event", netpol.Namespace, netpol.Name)
npc.RequestFullSync()
}
// Configure iptables rules representing each network policy. All pod's matched by
// network policy spec podselector labels are grouped together in one ipset which
// is used for matching destination ip address. Each ingress rule in the network
// policyspec is evaluated to set of matching pods, which are grouped in to a
// ipset used for source ip addr matching.
func (npc *NetworkPolicyController) syncNetworkPolicyChains(networkPoliciesInfo []networkPolicyInfo, version string) (map[string]bool, map[string]bool, error) {
start := time.Now()
defer func() {
endTime := time.Since(start)
metrics.ControllerPolicyChainsSyncTime.Observe(endTime.Seconds())
glog.V(2).Infof("Syncing network policy chains took %v", endTime)
}()
activePolicyChains := make(map[string]bool)
activePolicyIPSets := make(map[string]bool)
iptablesCmdHandler, err := iptables.New()
if err != nil {
glog.Fatalf("Failed to initialize iptables executor due to: %s", err.Error())
}
// run through all network policies
for _, policy := range networkPoliciesInfo {
// ensure there is a unique chain per network policy in filter table
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
err := iptablesCmdHandler.NewChain("filter", policyChainName)
if err != nil && err.(*iptables.Error).ExitStatus() != 1 {
return nil, nil, fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
activePolicyChains[policyChainName] = true
currnetPodIps := make([]string, 0, len(policy.targetPods))
for ip := range policy.targetPods {
currnetPodIps = append(currnetPodIps, ip)
}
if policy.policyType == "both" || policy.policyType == "ingress" {
// create a ipset for all destination pod ip's matched by the policy spec PodSelector
targetDestPodIPSetName := policyDestinationPodIPSetName(policy.namespace, policy.name)
targetDestPodIPSet, err := npc.ipSetHandler.Create(targetDestPodIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return nil, nil, fmt.Errorf("failed to create ipset: %s", err.Error())
}
err = targetDestPodIPSet.Refresh(currnetPodIps)
if err != nil {
glog.Errorf("failed to refresh targetDestPodIPSet,: " + err.Error())
}
err = npc.processIngressRules(policy, targetDestPodIPSetName, activePolicyIPSets, version)
if err != nil {
return nil, nil, err
}
activePolicyIPSets[targetDestPodIPSet.Name] = true
}
if policy.policyType == "both" || policy.policyType == "egress" {
// create a ipset for all source pod ip's matched by the policy spec PodSelector
targetSourcePodIPSetName := policySourcePodIPSetName(policy.namespace, policy.name)
targetSourcePodIPSet, err := npc.ipSetHandler.Create(targetSourcePodIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return nil, nil, fmt.Errorf("failed to create ipset: %s", err.Error())
}
err = targetSourcePodIPSet.Refresh(currnetPodIps)
if err != nil {
glog.Errorf("failed to refresh targetSourcePodIPSet: " + err.Error())
}
err = npc.processEgressRules(policy, targetSourcePodIPSetName, activePolicyIPSets, version)
if err != nil {
return nil, nil, err
}
activePolicyIPSets[targetSourcePodIPSet.Name] = true
}
}
glog.V(2).Infof("Iptables chains in the filter table are synchronized with the network policies.")
return activePolicyChains, activePolicyIPSets, nil
}
func (npc *NetworkPolicyController) processIngressRules(policy networkPolicyInfo,
targetDestPodIPSetName string, activePolicyIPSets map[string]bool, version string) error {
// From network policy spec: "If field 'Ingress' is empty then this NetworkPolicy does not allow any traffic "
// so no whitelist rules to be added to the network policy
if policy.ingressRules == nil {
return nil
}
iptablesCmdHandler, err := iptables.New()
if err != nil {
return fmt.Errorf("Failed to initialize iptables executor due to: %s", err.Error())
}
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
// run through all the ingress rules in the spec and create iptables rules
// in the chain for the network policy
for i, ingressRule := range policy.ingressRules {
if len(ingressRule.srcPods) != 0 {
srcPodIPSetName := policyIndexedSourcePodIPSetName(policy.namespace, policy.name, i)
srcPodIPSet, err := npc.ipSetHandler.Create(srcPodIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[srcPodIPSet.Name] = true
ingressRuleSrcPodIPs := make([]string, 0, len(ingressRule.srcPods))
for _, pod := range ingressRule.srcPods {
ingressRuleSrcPodIPs = append(ingressRuleSrcPodIPs, pod.ip)
}
err = srcPodIPSet.Refresh(ingressRuleSrcPodIPs)
if err != nil {
glog.Errorf("failed to refresh srcPodIPSet: " + err.Error())
}
if len(ingressRule.ports) != 0 {
// case where 'ports' details and 'from' details specified in the ingress rule
// so match on specified source and destination ip's and specified port (if any) and protocol
for _, portProtocol := range ingressRule.ports {
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, srcPodIPSetName, targetDestPodIPSetName, portProtocol.protocol, portProtocol.port); err != nil {
return err
}
}
}
if len(ingressRule.namedPorts) != 0 {
for j, endPoints := range ingressRule.namedPorts {
namedPortIPSetName := policyIndexedIngressNamedPortIPSetName(policy.namespace, policy.name, i, j)
namedPortIPSet, err := npc.ipSetHandler.Create(namedPortIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[namedPortIPSet.Name] = true
err = namedPortIPSet.Refresh(endPoints.ips)
if err != nil {
glog.Errorf("failed to refresh namedPortIPSet: " + err.Error())
}
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, srcPodIPSetName, namedPortIPSetName, endPoints.protocol, endPoints.port); err != nil {
return err
}
}
}
if len(ingressRule.ports) == 0 && len(ingressRule.namedPorts) == 0 {
// case where no 'ports' details specified in the ingress rule but 'from' details specified
// so match on specified source and destination ip with all port and protocol
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, srcPodIPSetName, targetDestPodIPSetName, "", ""); err != nil {
return err
}
}
}
// case where only 'ports' details specified but no 'from' details in the ingress rule
// so match on all sources, with specified port (if any) and protocol
if ingressRule.matchAllSource && !ingressRule.matchAllPorts {
for _, portProtocol := range ingressRule.ports {
comment := "rule to ACCEPT traffic from all sources to dest pods selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, "", targetDestPodIPSetName, portProtocol.protocol, portProtocol.port); err != nil {
return err
}
}
for j, endPoints := range ingressRule.namedPorts {
namedPortIPSetName := policyIndexedIngressNamedPortIPSetName(policy.namespace, policy.name, i, j)
namedPortIPSet, err := npc.ipSetHandler.Create(namedPortIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[namedPortIPSet.Name] = true
err = namedPortIPSet.Refresh(endPoints.ips)
if err != nil {
glog.Errorf("failed to refresh namedPortIPSet: " + err.Error())
}
comment := "rule to ACCEPT traffic from all sources to dest pods selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, "", namedPortIPSetName, endPoints.protocol, endPoints.port); err != nil {
return err
}
}
}
// case where nether ports nor from details are speified in the ingress rule
// so match on all ports, protocol, source IP's
if ingressRule.matchAllSource && ingressRule.matchAllPorts {
comment := "rule to ACCEPT traffic from all sources to dest pods selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, "", targetDestPodIPSetName, "", ""); err != nil {
return err
}
}
if len(ingressRule.srcIPBlocks) != 0 {
srcIPBlockIPSetName := policyIndexedSourceIPBlockIPSetName(policy.namespace, policy.name, i)
srcIPBlockIPSet, err := npc.ipSetHandler.Create(srcIPBlockIPSetName, utils.TypeHashNet, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[srcIPBlockIPSet.Name] = true
err = srcIPBlockIPSet.RefreshWithBuiltinOptions(ingressRule.srcIPBlocks)
if err != nil {
glog.Errorf("failed to refresh srcIPBlockIPSet: " + err.Error())
}
if !ingressRule.matchAllPorts {
for _, portProtocol := range ingressRule.ports {
comment := "rule to ACCEPT traffic from specified ipBlocks to dest pods selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, srcIPBlockIPSetName, targetDestPodIPSetName, portProtocol.protocol, portProtocol.port); err != nil {
return err
}
}
for j, endPoints := range ingressRule.namedPorts {
namedPortIPSetName := policyIndexedIngressNamedPortIPSetName(policy.namespace, policy.name, i, j)
namedPortIPSet, err := npc.ipSetHandler.Create(namedPortIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[namedPortIPSet.Name] = true
err = namedPortIPSet.Refresh(endPoints.ips)
if err != nil {
glog.Errorf("failed to refresh namedPortIPSet: " + err.Error())
}
comment := "rule to ACCEPT traffic from specified ipBlocks to dest pods selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, srcIPBlockIPSetName, namedPortIPSetName, endPoints.protocol, endPoints.port); err != nil {
return err
}
}
}
if ingressRule.matchAllPorts {
comment := "rule to ACCEPT traffic from specified ipBlocks to dest pods selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, srcIPBlockIPSetName, targetDestPodIPSetName, "", ""); err != nil {
return err
}
}
}
}
return nil
}
func (npc *NetworkPolicyController) processEgressRules(policy networkPolicyInfo,
targetSourcePodIPSetName string, activePolicyIPSets map[string]bool, version string) error {
// From network policy spec: "If field 'Ingress' is empty then this NetworkPolicy does not allow any traffic "
// so no whitelist rules to be added to the network policy
if policy.egressRules == nil {
return nil
}
iptablesCmdHandler, err := iptables.New()
if err != nil {
return fmt.Errorf("Failed to initialize iptables executor due to: %s", err.Error())
}
policyChainName := networkPolicyChainName(policy.namespace, policy.name, version)
// run through all the egress rules in the spec and create iptables rules
// in the chain for the network policy
for i, egressRule := range policy.egressRules {
if len(egressRule.dstPods) != 0 {
dstPodIPSetName := policyIndexedDestinationPodIPSetName(policy.namespace, policy.name, i)
dstPodIPSet, err := npc.ipSetHandler.Create(dstPodIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[dstPodIPSet.Name] = true
egressRuleDstPodIps := make([]string, 0, len(egressRule.dstPods))
for _, pod := range egressRule.dstPods {
egressRuleDstPodIps = append(egressRuleDstPodIps, pod.ip)
}
err = dstPodIPSet.Refresh(egressRuleDstPodIps)
if err != nil {
glog.Errorf("failed to refresh dstPodIPSet: " + err.Error())
}
if len(egressRule.ports) != 0 {
// case where 'ports' details and 'from' details specified in the egress rule
// so match on specified source and destination ip's and specified port (if any) and protocol
for _, portProtocol := range egressRule.ports {
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, targetSourcePodIPSetName, dstPodIPSetName, portProtocol.protocol, portProtocol.port); err != nil {
return err
}
}
}
if len(egressRule.namedPorts) != 0 {
for j, endPoints := range egressRule.namedPorts {
namedPortIPSetName := policyIndexedEgressNamedPortIPSetName(policy.namespace, policy.name, i, j)
namedPortIPSet, err := npc.ipSetHandler.Create(namedPortIPSetName, utils.TypeHashIP, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[namedPortIPSet.Name] = true
err = namedPortIPSet.Refresh(endPoints.ips)
if err != nil {
glog.Errorf("failed to refresh namedPortIPSet: " + err.Error())
}
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, targetSourcePodIPSetName, namedPortIPSetName, endPoints.protocol, endPoints.port); err != nil {
return err
}
}
}
if len(egressRule.ports) == 0 && len(egressRule.namedPorts) == 0 {
// case where no 'ports' details specified in the ingress rule but 'from' details specified
// so match on specified source and destination ip with all port and protocol
comment := "rule to ACCEPT traffic from source pods to dest pods selected by policy name " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, targetSourcePodIPSetName, dstPodIPSetName, "", ""); err != nil {
return err
}
}
}
// case where only 'ports' details specified but no 'to' details in the egress rule
// so match on all sources, with specified port (if any) and protocol
if egressRule.matchAllDestinations && !egressRule.matchAllPorts {
for _, portProtocol := range egressRule.ports {
comment := "rule to ACCEPT traffic from source pods to all destinations selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, targetSourcePodIPSetName, "", portProtocol.protocol, portProtocol.port); err != nil {
return err
}
}
}
// case where nether ports nor from details are speified in the egress rule
// so match on all ports, protocol, source IP's
if egressRule.matchAllDestinations && egressRule.matchAllPorts {
comment := "rule to ACCEPT traffic from source pods to all destinations selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, targetSourcePodIPSetName, "", "", ""); err != nil {
return err
}
}
if len(egressRule.dstIPBlocks) != 0 {
dstIPBlockIPSetName := policyIndexedDestinationIPBlockIPSetName(policy.namespace, policy.name, i)
dstIPBlockIPSet, err := npc.ipSetHandler.Create(dstIPBlockIPSetName, utils.TypeHashNet, utils.OptionTimeout, "0")
if err != nil {
return fmt.Errorf("failed to create ipset: %s", err.Error())
}
activePolicyIPSets[dstIPBlockIPSet.Name] = true
err = dstIPBlockIPSet.RefreshWithBuiltinOptions(egressRule.dstIPBlocks)
if err != nil {
glog.Errorf("failed to refresh dstIPBlockIPSet: " + err.Error())
}
if !egressRule.matchAllPorts {
for _, portProtocol := range egressRule.ports {
comment := "rule to ACCEPT traffic from source pods to specified ipBlocks selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, targetSourcePodIPSetName, dstIPBlockIPSetName, portProtocol.protocol, portProtocol.port); err != nil {
return err
}
}
}
if egressRule.matchAllPorts {
comment := "rule to ACCEPT traffic from source pods to specified ipBlocks selected by policy name: " +
policy.name + " namespace " + policy.namespace
if err := npc.appendRuleToPolicyChain(iptablesCmdHandler, policyChainName, comment, targetSourcePodIPSetName, dstIPBlockIPSetName, "", ""); err != nil {
return err
}
}
}
}
return nil
}
func (npc *NetworkPolicyController) appendRuleToPolicyChain(iptablesCmdHandler *iptables.IPTables, policyChainName, comment, srcIPSetName, dstIPSetName, protocol, dPort string) error {
if iptablesCmdHandler == nil {
return fmt.Errorf("Failed to run iptables command: iptablesCmdHandler is nil")
}
args := make([]string, 0)
if comment != "" {
args = append(args, "-m", "comment", "--comment", comment)
}
if srcIPSetName != "" {
args = append(args, "-m", "set", "--match-set", srcIPSetName, "src")
}
if dstIPSetName != "" {
args = append(args, "-m", "set", "--match-set", dstIPSetName, "dst")
}
if protocol != "" {
args = append(args, "-p", protocol)
}
if dPort != "" {
args = append(args, "--dport", dPort)
}
markComment := "rule to mark traffic matching a network policy"
markArgs := append(args, "-j", "MARK", "-m", "comment", "--comment", markComment, "--set-xmark", "0x10000/0x10000")
err := iptablesCmdHandler.AppendUnique("filter", policyChainName, markArgs...)
if err != nil {
return fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
returnComment := "rule to RETURN traffic matching a network policy"
returnArgs := append(args, "-m", "comment", "--comment", returnComment, "-m", "mark", "--mark", "0x10000/0x10000", "-j", "RETURN")
err = iptablesCmdHandler.AppendUnique("filter", policyChainName, returnArgs...)
if err != nil {
return fmt.Errorf("Failed to run iptables command: %s", err.Error())
}
return nil
}
func (npc *NetworkPolicyController) buildNetworkPoliciesInfo() ([]networkPolicyInfo, error) {
NetworkPolicies := make([]networkPolicyInfo, 0)
for _, policyObj := range npc.npLister.List() {
policy, ok := policyObj.(*networking.NetworkPolicy)
podSelector, _ := v1.LabelSelectorAsSelector(&policy.Spec.PodSelector)
if !ok {
return nil, fmt.Errorf("Failed to convert")
}
newPolicy := networkPolicyInfo{
name: policy.Name,
namespace: policy.Namespace,
podSelector: podSelector,
policyType: "ingress",
}
ingressType, egressType := false, false
for _, policyType := range policy.Spec.PolicyTypes {
if policyType == networking.PolicyTypeIngress {
ingressType = true
}
if policyType == networking.PolicyTypeEgress {
egressType = true
}
}
if ingressType && egressType {
newPolicy.policyType = "both"
} else if egressType {
newPolicy.policyType = "egress"
} else if ingressType {
newPolicy.policyType = "ingress"
}
matchingPods, err := npc.ListPodsByNamespaceAndLabels(policy.Namespace, podSelector)
newPolicy.targetPods = make(map[string]podInfo)
namedPort2IngressEps := make(namedPort2eps)
if err == nil {
for _, matchingPod := range matchingPods {
if matchingPod.Status.PodIP == "" {
continue
}
newPolicy.targetPods[matchingPod.Status.PodIP] = podInfo{ip: matchingPod.Status.PodIP,
name: matchingPod.ObjectMeta.Name,
namespace: matchingPod.ObjectMeta.Namespace,
labels: matchingPod.ObjectMeta.Labels}
npc.grabNamedPortFromPod(matchingPod, &namedPort2IngressEps)
}
}
if policy.Spec.Ingress == nil {
newPolicy.ingressRules = nil
} else {
newPolicy.ingressRules = make([]ingressRule, 0)
}
if policy.Spec.Egress == nil {
newPolicy.egressRules = nil
} else {
newPolicy.egressRules = make([]egressRule, 0)
}
for _, specIngressRule := range policy.Spec.Ingress {
ingressRule := ingressRule{}
ingressRule.srcPods = make([]podInfo, 0)
ingressRule.srcIPBlocks = make([][]string, 0)
// If this field is empty or missing in the spec, this rule matches all sources
if len(specIngressRule.From) == 0 {
ingressRule.matchAllSource = true
} else {
ingressRule.matchAllSource = false
for _, peer := range specIngressRule.From {
if peerPods, err := npc.evalPodPeer(policy, peer); err == nil {
for _, peerPod := range peerPods {
if peerPod.Status.PodIP == "" {
continue
}
ingressRule.srcPods = append(ingressRule.srcPods,
podInfo{ip: peerPod.Status.PodIP,
name: peerPod.ObjectMeta.Name,
namespace: peerPod.ObjectMeta.Namespace,
labels: peerPod.ObjectMeta.Labels})
}
}
ingressRule.srcIPBlocks = append(ingressRule.srcIPBlocks, npc.evalIPBlockPeer(peer)...)
}
}
ingressRule.ports = make([]protocolAndPort, 0)
ingressRule.namedPorts = make([]endPoints, 0)
// If this field is empty or missing in the spec, this rule matches all ports
if len(specIngressRule.Ports) == 0 {
ingressRule.matchAllPorts = true
} else {
ingressRule.matchAllPorts = false
ingressRule.ports, ingressRule.namedPorts = npc.processNetworkPolicyPorts(specIngressRule.Ports, namedPort2IngressEps)
}
newPolicy.ingressRules = append(newPolicy.ingressRules, ingressRule)
}
for _, specEgressRule := range policy.Spec.Egress {
egressRule := egressRule{}
egressRule.dstPods = make([]podInfo, 0)
egressRule.dstIPBlocks = make([][]string, 0)
namedPort2EgressEps := make(namedPort2eps)
// If this field is empty or missing in the spec, this rule matches all sources
if len(specEgressRule.To) == 0 {
egressRule.matchAllDestinations = true
} else {
egressRule.matchAllDestinations = false
for _, peer := range specEgressRule.To {
if peerPods, err := npc.evalPodPeer(policy, peer); err == nil {
for _, peerPod := range peerPods {
if peerPod.Status.PodIP == "" {
continue
}
egressRule.dstPods = append(egressRule.dstPods,
podInfo{ip: peerPod.Status.PodIP,
name: peerPod.ObjectMeta.Name,
namespace: peerPod.ObjectMeta.Namespace,
labels: peerPod.ObjectMeta.Labels})
npc.grabNamedPortFromPod(peerPod, &namedPort2EgressEps)
}
}
egressRule.dstIPBlocks = append(egressRule.dstIPBlocks, npc.evalIPBlockPeer(peer)...)
}
}
egressRule.ports = make([]protocolAndPort, 0)
egressRule.namedPorts = make([]endPoints, 0)
// If this field is empty or missing in the spec, this rule matches all ports
if len(specEgressRule.Ports) == 0 {
egressRule.matchAllPorts = true
} else {
egressRule.matchAllPorts = false
egressRule.ports, egressRule.namedPorts = npc.processNetworkPolicyPorts(specEgressRule.Ports, namedPort2EgressEps)
}
newPolicy.egressRules = append(newPolicy.egressRules, egressRule)
}
NetworkPolicies = append(NetworkPolicies, newPolicy)
}
return NetworkPolicies, nil
}
func (npc *NetworkPolicyController) evalPodPeer(policy *networking.NetworkPolicy, peer networking.NetworkPolicyPeer) ([]*api.Pod, error) {
var matchingPods []*api.Pod
matchingPods = make([]*api.Pod, 0)
var err error
// spec can have both PodSelector AND NamespaceSelector
if peer.NamespaceSelector != nil {
namespaceSelector, _ := v1.LabelSelectorAsSelector(peer.NamespaceSelector)
namespaces, err := npc.ListNamespaceByLabels(namespaceSelector)
if err != nil {
return nil, errors.New("Failed to build network policies info due to " + err.Error())
}
podSelector := labels.Everything()
if peer.PodSelector != nil {
podSelector, _ = v1.LabelSelectorAsSelector(peer.PodSelector)
}
for _, namespace := range namespaces {
namespacePods, err := npc.ListPodsByNamespaceAndLabels(namespace.Name, podSelector)
if err != nil {
return nil, errors.New("Failed to build network policies info due to " + err.Error())
}
matchingPods = append(matchingPods, namespacePods...)
}
} else if peer.PodSelector != nil {
podSelector, _ := v1.LabelSelectorAsSelector(peer.PodSelector)
matchingPods, err = npc.ListPodsByNamespaceAndLabels(policy.Namespace, podSelector)
}
return matchingPods, err
}
func (npc *NetworkPolicyController) processNetworkPolicyPorts(npPorts []networking.NetworkPolicyPort, namedPort2eps namedPort2eps) (numericPorts []protocolAndPort, namedPorts []endPoints) {
numericPorts, namedPorts = make([]protocolAndPort, 0), make([]endPoints, 0)
for _, npPort := range npPorts {
if npPort.Port == nil {
numericPorts = append(numericPorts, protocolAndPort{port: "", protocol: string(*npPort.Protocol)})
} else if npPort.Port.Type == intstr.Int {
numericPorts = append(numericPorts, protocolAndPort{port: npPort.Port.String(), protocol: string(*npPort.Protocol)})
} else {
if protocol2eps, ok := namedPort2eps[npPort.Port.String()]; ok {
if numericPort2eps, ok := protocol2eps[string(*npPort.Protocol)]; ok {
for _, eps := range numericPort2eps {
namedPorts = append(namedPorts, *eps)
}
}
}
}
}
return
}
func (npc *NetworkPolicyController) ListPodsByNamespaceAndLabels(namespace string, podSelector labels.Selector) (ret []*api.Pod, err error) {
podLister := listers.NewPodLister(npc.podLister)
allMatchedNameSpacePods, err := podLister.Pods(namespace).List(podSelector)
if err != nil {
return nil, err
}
return allMatchedNameSpacePods, nil
}
func (npc *NetworkPolicyController) ListNamespaceByLabels(namespaceSelector labels.Selector) ([]*api.Namespace, error) {
namespaceLister := listers.NewNamespaceLister(npc.nsLister)
matchedNamespaces, err := namespaceLister.List(namespaceSelector)
if err != nil {
return nil, err
}
return matchedNamespaces, nil
}
func (npc *NetworkPolicyController) evalIPBlockPeer(peer networking.NetworkPolicyPeer) [][]string {
ipBlock := make([][]string, 0)
if peer.PodSelector == nil && peer.NamespaceSelector == nil && peer.IPBlock != nil {
if cidr := peer.IPBlock.CIDR; strings.HasSuffix(cidr, "/0") {
ipBlock = append(ipBlock, []string{"0.0.0.0/1", utils.OptionTimeout, "0"}, []string{"128.0.0.0/1", utils.OptionTimeout, "0"})
} else {
ipBlock = append(ipBlock, []string{cidr, utils.OptionTimeout, "0"})
}
for _, except := range peer.IPBlock.Except {
if strings.HasSuffix(except, "/0") {
ipBlock = append(ipBlock, []string{"0.0.0.0/1", utils.OptionTimeout, "0", utils.OptionNoMatch}, []string{"128.0.0.0/1", utils.OptionTimeout, "0", utils.OptionNoMatch})
} else {
ipBlock = append(ipBlock, []string{except, utils.OptionTimeout, "0", utils.OptionNoMatch})
}
}
}
return ipBlock
}
func (npc *NetworkPolicyController) grabNamedPortFromPod(pod *api.Pod, namedPort2eps *namedPort2eps) {
if pod == nil || namedPort2eps == nil {
return
}
for k := range pod.Spec.Containers {
for _, port := range pod.Spec.Containers[k].Ports {
name := port.Name
protocol := string(port.Protocol)
containerPort := strconv.Itoa(int(port.ContainerPort))
if (*namedPort2eps)[name] == nil {
(*namedPort2eps)[name] = make(protocol2eps)
}
if (*namedPort2eps)[name][protocol] == nil {
(*namedPort2eps)[name][protocol] = make(numericPort2eps)
}
if eps, ok := (*namedPort2eps)[name][protocol][containerPort]; !ok {
(*namedPort2eps)[name][protocol][containerPort] = &endPoints{
ips: []string{pod.Status.PodIP},
protocolAndPort: protocolAndPort{port: containerPort, protocol: protocol},
}
} else {
eps.ips = append(eps.ips, pod.Status.PodIP)
}
}
}
}
func networkPolicyChainName(namespace, policyName string, version string) string {
hash := sha256.Sum256([]byte(namespace + policyName + version))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeNetworkPolicyChainPrefix + encoded[:16]
}
func policySourcePodIPSetName(namespace, policyName string) string {
hash := sha256.Sum256([]byte(namespace + policyName))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeSourceIPSetPrefix + encoded[:16]
}
func policyDestinationPodIPSetName(namespace, policyName string) string {
hash := sha256.Sum256([]byte(namespace + policyName))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeDestinationIPSetPrefix + encoded[:16]
}
func policyIndexedSourcePodIPSetName(namespace, policyName string, ingressRuleNo int) string {
hash := sha256.Sum256([]byte(namespace + policyName + "ingressrule" + strconv.Itoa(ingressRuleNo) + "pod"))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeSourceIPSetPrefix + encoded[:16]
}
func policyIndexedDestinationPodIPSetName(namespace, policyName string, egressRuleNo int) string {
hash := sha256.Sum256([]byte(namespace + policyName + "egressrule" + strconv.Itoa(egressRuleNo) + "pod"))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeDestinationIPSetPrefix + encoded[:16]
}
func policyIndexedSourceIPBlockIPSetName(namespace, policyName string, ingressRuleNo int) string {
hash := sha256.Sum256([]byte(namespace + policyName + "ingressrule" + strconv.Itoa(ingressRuleNo) + "ipblock"))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeSourceIPSetPrefix + encoded[:16]
}
func policyIndexedDestinationIPBlockIPSetName(namespace, policyName string, egressRuleNo int) string {
hash := sha256.Sum256([]byte(namespace + policyName + "egressrule" + strconv.Itoa(egressRuleNo) + "ipblock"))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeDestinationIPSetPrefix + encoded[:16]
}
func policyIndexedIngressNamedPortIPSetName(namespace, policyName string, ingressRuleNo, namedPortNo int) string {
hash := sha256.Sum256([]byte(namespace + policyName + "ingressrule" + strconv.Itoa(ingressRuleNo) + strconv.Itoa(namedPortNo) + "namedport"))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeDestinationIPSetPrefix + encoded[:16]
}
func policyIndexedEgressNamedPortIPSetName(namespace, policyName string, egressRuleNo, namedPortNo int) string {
hash := sha256.Sum256([]byte(namespace + policyName + "egressrule" + strconv.Itoa(egressRuleNo) + strconv.Itoa(namedPortNo) + "namedport"))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return kubeDestinationIPSetPrefix + encoded[:16]
}

View File

@ -1,21 +1,11 @@
// Apache License v2.0 (copyright Cloud Native Labs & Rancher Labs)
// - modified from https://github.com/cloudnativelabs/kube-router/tree/d6f9f31a7b/pkg/utils
// +build !windows
package netpol
package utils
import (
"bytes"
"errors"
"fmt"
"net"
"os/exec"
"strings"
"time"
apiv1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
)
var (
@ -31,7 +21,7 @@ const (
// DefaultMaxElem Default OptionMaxElem value.
DefaultMaxElem = "65536"
// DefaultHasSize Default OptionHashSize value.
// DefaultHasSize Defaul OptionHashSize value.
DefaultHasSize = "1024"
// TypeHashIP The hash:ip set type uses a hash to store IP host addresses (default) or network addresses. Zero valued IP address cannot be stored in a hash:ip type of set.
@ -96,7 +86,7 @@ type IPSet struct {
isIpv6 bool
}
// Set represent a ipset set entry.
// Set reprensent a ipset set entry.
type Set struct {
Parent *IPSet
Name string
@ -156,8 +146,8 @@ func (ipset *IPSet) runWithStdin(stdin *bytes.Buffer, args ...string) (string, e
return stdout.String(), nil
}
// NewSavedIPSet create a new IPSet with ipSetPath initialized.
func NewSavedIPSet(isIpv6 bool) (*IPSet, error) {
// NewIPSet create a new IPSet with ipSetPath initialized.
func NewIPSet(isIpv6 bool) (*IPSet, error) {
ipSetPath, err := getIPSetPath()
if err != nil {
return nil, err
@ -167,9 +157,6 @@ func NewSavedIPSet(isIpv6 bool) (*IPSet, error) {
Sets: make(map[string]*Set),
isIpv6: isIpv6,
}
if err := ipSet.Save(); err != nil {
return nil, err
}
return ipSet, nil
}
@ -221,11 +208,14 @@ func (ipset *IPSet) Add(set *Set) error {
return err
}
for _, entry := range set.Entries {
_, err := ipset.Get(set.Name).Add(entry.Options...)
if err != nil {
return err
}
options := make([][]string, len(set.Entries))
for index, entry := range set.Entries {
options[index] = entry.Options
}
err = ipset.Get(set.Name).BatchAdd(options)
if err != nil {
return err
}
return nil
@ -233,6 +223,8 @@ func (ipset *IPSet) Add(set *Set) error {
// Add a given entry to the set. If the -exist option is specified, ipset
// ignores if the entry already added to the set.
// Note: if you need to add multiple entries (e.g., in a loop), use BatchAdd instead,
// as its much more performant.
func (set *Set) Add(addOptions ...string) (*Entry, error) {
entry := &Entry{
Set: set,
@ -246,6 +238,35 @@ func (set *Set) Add(addOptions ...string) (*Entry, error) {
return entry, nil
}
// Adds given entries (with their options) to the set.
// For multiple items, this is much faster than Add().
func (set *Set) BatchAdd(addOptions [][]string) error {
newEntries := make([]*Entry, len(addOptions))
for index, options := range addOptions {
entry := &Entry{
Set: set,
Options: options,
}
newEntries[index] = entry
}
set.Entries = append(set.Entries, newEntries...)
// Build the `restore` command contents
var builder strings.Builder
for _, options := range addOptions {
line := strings.Join(append([]string{"add", "-exist", set.name()}, options...), " ")
builder.WriteString(line + "\n")
}
restoreContents := builder.String()
// Invoke the command
_, err := set.Parent.runWithStdin(bytes.NewBufferString(restoreContents), "restore")
if err != nil {
return err
}
return nil
}
// Del an entry from a set. If the -exist option is specified and the entry is
// not in the set (maybe already expired), then the command is ignored.
func (entry *Entry) Del() error {
@ -253,11 +274,14 @@ func (entry *Entry) Del() error {
if err != nil {
return err
}
return entry.Set.Parent.Save()
err = entry.Set.Parent.Save()
if err != nil {
return err
}
return nil
}
// Test whether an entry is in a set or not. Exit status number is zero if the
// Test wether an entry is in a set or not. Exit status number is zero if the
// tested entry is in the set and nonzero if it is missing from the set.
func (set *Set) Test(testOptions ...string) (bool, error) {
_, err := set.Parent.run(append([]string{"test", set.name()}, testOptions...)...)
@ -388,7 +412,7 @@ func (ipset *IPSet) Save() error {
// stdin. Please note, existing sets and elements are not erased by restore
// unless specified so in the restore file. All commands are allowed in restore
// mode except list, help, version, interactive mode and restore itself.
// Send formatted ipset.sets into stdin of "ipset restore" command.
// Send formated ipset.sets into stdin of "ipset restore" command.
func (ipset *IPSet) Restore() error {
stdin := bytes.NewBufferString(buildIPSetRestore(ipset))
_, err := ipset.runWithStdin(stdin, "restore", "-exist")
@ -451,7 +475,19 @@ func (set *Set) Swap(setTo *Set) error {
// Refresh a Set with new entries.
func (set *Set) Refresh(entries []string, extraOptions ...string) error {
entriesWithOptions := make([][]string, len(entries))
for index, entry := range entries {
entriesWithOptions[index] = append([]string{entry}, extraOptions...)
}
return set.RefreshWithBuiltinOptions(entriesWithOptions)
}
// Refresh a Set with new entries with built-in options.
func (set *Set) RefreshWithBuiltinOptions(entries [][]string) error {
var err error
// The set-name must be < 32 characters!
tempName := set.Name + "-"
@ -466,11 +502,9 @@ func (set *Set) Refresh(entries []string, extraOptions ...string) error {
return err
}
for _, entry := range entries {
_, err = newSet.Add(entry)
if err != nil {
return err
}
err = newSet.BatchAdd(entries)
if err != nil {
return err
}
err = set.Swap(newSet)
@ -485,73 +519,3 @@ func (set *Set) Refresh(entries []string, extraOptions ...string) error {
return nil
}
// Refresh a Set with new entries with built-in options.
func (set *Set) RefreshWithBuiltinOptions(entries [][]string) error {
var err error
tempName := set.Name + "-temp"
newSet := &Set{
Parent: set.Parent,
Name: tempName,
Options: set.Options,
}
err = set.Parent.Add(newSet)
if err != nil {
return err
}
for _, entry := range entries {
_, err = newSet.Add(entry...)
if err != nil {
return err
}
}
err = set.Swap(newSet)
if err != nil {
return err
}
err = set.Parent.Destroy(tempName)
if err != nil {
return err
}
return nil
}
// GetNodeIP returns the most valid external facing IP address for a node.
// Order of preference:
// 1. NodeInternalIP
// 2. NodeExternalIP (Only set on cloud providers usually)
func GetNodeIP(node *apiv1.Node) (net.IP, error) {
addresses := node.Status.Addresses
addressMap := make(map[apiv1.NodeAddressType][]apiv1.NodeAddress)
for i := range addresses {
addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i])
}
if addresses, ok := addressMap[apiv1.NodeInternalIP]; ok {
return net.ParseIP(addresses[0].Address), nil
}
if addresses, ok := addressMap[apiv1.NodeExternalIP]; ok {
return net.ParseIP(addresses[0].Address), nil
}
return nil, errors.New("host IP unknown")
}
// CacheSync performs cache synchronization under timeout limit
func CacheSyncOrTimeout(informerFactory informers.SharedInformerFactory, stopCh <-chan struct{}, cacheSyncTimeout time.Duration) error {
syncOverCh := make(chan struct{})
go func() {
informerFactory.WaitForCacheSync(stopCh)
close(syncOverCh)
}()
select {
case <-time.After(cacheSyncTimeout):
return errors.New(cacheSyncTimeout.String() + " timeout")
case <-syncOverCh:
return nil
}
}

View File

@ -0,0 +1,61 @@
package utils
import (
"context"
"errors"
"fmt"
"net"
"os"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// GetNodeObject returns the node API object for the node
func GetNodeObject(clientset kubernetes.Interface, hostnameOverride string) (*apiv1.Node, error) {
// assuming kube-router is running as pod, first check env NODE_NAME
nodeName := os.Getenv("NODE_NAME")
if nodeName != "" {
node, err := clientset.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{})
if err == nil {
return node, nil
}
}
// if env NODE_NAME is not set then check if node is register with hostname
hostName, _ := os.Hostname()
node, err := clientset.CoreV1().Nodes().Get(context.Background(), hostName, metav1.GetOptions{})
if err == nil {
return node, nil
}
// if env NODE_NAME is not set and node is not registered with hostname, then use host name override
if hostnameOverride != "" {
node, err = clientset.CoreV1().Nodes().Get(context.Background(), hostnameOverride, metav1.GetOptions{})
if err == nil {
return node, nil
}
}
return nil, fmt.Errorf("Failed to identify the node by NODE_NAME, hostname or --hostname-override")
}
// GetNodeIP returns the most valid external facing IP address for a node.
// Order of preference:
// 1. NodeInternalIP
// 2. NodeExternalIP (Only set on cloud providers usually)
func GetNodeIP(node *apiv1.Node) (net.IP, error) {
addresses := node.Status.Addresses
addressMap := make(map[apiv1.NodeAddressType][]apiv1.NodeAddress)
for i := range addresses {
addressMap[addresses[i].Type] = append(addressMap[addresses[i].Type], addresses[i])
}
if addresses, ok := addressMap[apiv1.NodeInternalIP]; ok {
return net.ParseIP(addresses[0].Address), nil
}
if addresses, ok := addressMap[apiv1.NodeExternalIP]; ok {
return net.ParseIP(addresses[0].Address), nil
}
return nil, errors.New("host IP unknown")
}